[llvm] 6d103ca - [SelectionDAG] Unify scalarizeVectorLoad and VectorLegalizer::ExpandLoad

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat May 2 15:18:45 PDT 2020


Author: LemonBoy
Date: 2020-05-02T15:18:10-07:00
New Revision: 6d103ca855552054f34849ecf0743bb39cebc8a9

URL: https://github.com/llvm/llvm-project/commit/6d103ca855552054f34849ecf0743bb39cebc8a9
DIFF: https://github.com/llvm/llvm-project/commit/6d103ca855552054f34849ecf0743bb39cebc8a9.diff

LOG: [SelectionDAG] Unify scalarizeVectorLoad and VectorLegalizer::ExpandLoad

The two code paths have the same goal, legalizing a load of a non-byte-sized vector by loading the "flattened" representation in memory, slicing off each single element and then building a vector out of those pieces.

The technique employed by `ExpandLoad`  is slightly more convoluted and produces slightly better codegen on ARM, AMDGPU and x86 but suffers from some bugs (D78480) and is wrong for BE machines.

Differential Revision: https://reviews.llvm.org/D79096

Added: 
    llvm/test/CodeGen/X86/load-local-v4i5.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/AMDGPU/idot8s.ll
    llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll
    llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll
    llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
    llvm/test/CodeGen/X86/bitcast-vector-bool.ll
    llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll
    llvm/test/CodeGen/X86/load-local-v3i1.ll
    llvm/test/CodeGen/X86/pr15267.ll
    llvm/test/CodeGen/X86/vector-sext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index ef994b3f10ba..8f746ec45f6c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -702,131 +702,7 @@ void VectorLegalizer::PromoteFP_TO_INT(SDNode *Node,
 
 std::pair<SDValue, SDValue> VectorLegalizer::ExpandLoad(SDNode *N) {
   LoadSDNode *LD = cast<LoadSDNode>(N);
-
-  EVT SrcVT = LD->getMemoryVT();
-  EVT SrcEltVT = SrcVT.getScalarType();
-  unsigned NumElem = SrcVT.getVectorNumElements();
-
-  SDValue NewChain;
-  SDValue Value;
-  if (SrcVT.getVectorNumElements() > 1 && !SrcEltVT.isByteSized()) {
-    SDLoc dl(N);
-
-    SmallVector<SDValue, 8> Vals;
-    SmallVector<SDValue, 8> LoadChains;
-
-    EVT DstEltVT = LD->getValueType(0).getScalarType();
-    SDValue Chain = LD->getChain();
-    SDValue BasePTR = LD->getBasePtr();
-    ISD::LoadExtType ExtType = LD->getExtensionType();
-
-    // When elements in a vector is not byte-addressable, we cannot directly
-    // load each element by advancing pointer, which could only address bytes.
-    // Instead, we load all significant words, mask bits off, and concatenate
-    // them to form each element. Finally, they are extended to destination
-    // scalar type to build the destination vector.
-    EVT WideVT = TLI.getPointerTy(DAG.getDataLayout());
-
-    assert(WideVT.isRound() &&
-           "Could not handle the sophisticated case when the widest integer is"
-           " not power of 2.");
-    assert(WideVT.bitsGE(SrcEltVT) &&
-           "Type is not legalized?");
-
-    unsigned WideBytes = WideVT.getStoreSize();
-    unsigned Offset = 0;
-    unsigned RemainingBytes = SrcVT.getStoreSize();
-    SmallVector<SDValue, 8> LoadVals;
-    while (RemainingBytes > 0) {
-      SDValue ScalarLoad;
-      unsigned LoadBytes = WideBytes;
-
-      if (RemainingBytes >= LoadBytes) {
-        ScalarLoad = DAG.getLoad(
-            WideVT, dl, Chain, BasePTR,
-            LD->getPointerInfo().getWithOffset(Offset), LD->getOriginalAlign(),
-            LD->getMemOperand()->getFlags(), LD->getAAInfo());
-      } else {
-        EVT LoadVT = WideVT;
-        while (RemainingBytes < LoadBytes) {
-          LoadBytes >>= 1; // Reduce the load size by half.
-          LoadVT = EVT::getIntegerVT(*DAG.getContext(), LoadBytes << 3);
-        }
-        ScalarLoad =
-            DAG.getExtLoad(ISD::EXTLOAD, dl, WideVT, Chain, BasePTR,
-                           LD->getPointerInfo().getWithOffset(Offset), LoadVT,
-                           LD->getOriginalAlign(),
-                           LD->getMemOperand()->getFlags(), LD->getAAInfo());
-      }
-
-      RemainingBytes -= LoadBytes;
-      Offset += LoadBytes;
-
-      BasePTR = DAG.getObjectPtrOffset(dl, BasePTR, LoadBytes);
-
-      LoadVals.push_back(ScalarLoad.getValue(0));
-      LoadChains.push_back(ScalarLoad.getValue(1));
-    }
-
-    unsigned BitOffset = 0;
-    unsigned WideIdx = 0;
-    unsigned WideBits = WideVT.getSizeInBits();
-
-    // Extract bits, pack and extend/trunc them into destination type.
-    unsigned SrcEltBits = SrcEltVT.getSizeInBits();
-    SDValue SrcEltBitMask = DAG.getConstant(
-        APInt::getLowBitsSet(WideBits, SrcEltBits), dl, WideVT);
-
-    for (unsigned Idx = 0; Idx != NumElem; ++Idx) {
-      assert(BitOffset < WideBits && "Unexpected offset!");
-
-      SDValue ShAmt = DAG.getConstant(
-          BitOffset, dl, TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
-      SDValue Lo = DAG.getNode(ISD::SRL, dl, WideVT, LoadVals[WideIdx], ShAmt);
-
-      BitOffset += SrcEltBits;
-      if (BitOffset >= WideBits) {
-        WideIdx++;
-        BitOffset -= WideBits;
-        if (BitOffset > 0) {
-          ShAmt = DAG.getConstant(
-              SrcEltBits - BitOffset, dl,
-              TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
-          SDValue Hi =
-              DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt);
-          Lo = DAG.getNode(ISD::OR, dl, WideVT, Lo, Hi);
-        }
-      }
-
-      Lo = DAG.getNode(ISD::AND, dl, WideVT, Lo, SrcEltBitMask);
-
-      switch (ExtType) {
-      default: llvm_unreachable("Unknown extended-load op!");
-      case ISD::EXTLOAD:
-        Lo = DAG.getAnyExtOrTrunc(Lo, dl, DstEltVT);
-        break;
-      case ISD::ZEXTLOAD:
-        Lo = DAG.getZExtOrTrunc(Lo, dl, DstEltVT);
-        break;
-      case ISD::SEXTLOAD:
-        ShAmt =
-            DAG.getConstant(WideBits - SrcEltBits, dl,
-                            TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
-        Lo = DAG.getNode(ISD::SHL, dl, WideVT, Lo, ShAmt);
-        Lo = DAG.getNode(ISD::SRA, dl, WideVT, Lo, ShAmt);
-        Lo = DAG.getSExtOrTrunc(Lo, dl, DstEltVT);
-        break;
-      }
-      Vals.push_back(Lo);
-    }
-
-    NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
-    Value = DAG.getBuildVector(N->getValueType(0), dl, Vals);
-  } else {
-    std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG);
-  }
-
-  return std::make_pair(Value, NewChain);
+  return TLI.scalarizeVectorLoad(LD, DAG);
 }
 
 SDValue VectorLegalizer::ExpandStore(SDNode *N) {

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 9148ce35610f..1975f0dde30d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6620,27 +6620,40 @@ TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
   // elements that are byte-sized must therefore be stored as an integer
   // built out of the extracted vector elements.
   if (!SrcEltVT.isByteSized()) {
-    unsigned NumBits = SrcVT.getSizeInBits();
-    EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits);
+    unsigned NumLoadBits = SrcVT.getStoreSizeInBits();
+    EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits);
+
+    unsigned NumSrcBits = SrcVT.getSizeInBits();
+    EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits);
 
-    SDValue Load = DAG.getLoad(IntVT, SL, Chain, BasePTR, LD->getPointerInfo(),
-                               LD->getAlignment(),
-                               LD->getMemOperand()->getFlags(),
-                               LD->getAAInfo());
+    unsigned SrcEltBits = SrcEltVT.getSizeInBits();
+    SDValue SrcEltBitMask = DAG.getConstant(
+        APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT);
+
+    // Load the whole vector and avoid masking off the top bits as it makes
+    // the codegen worse.
+    SDValue Load =
+        DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR,
+                       LD->getPointerInfo(), SrcIntVT, LD->getAlignment(),
+                       LD->getMemOperand()->getFlags(), LD->getAAInfo());
 
     SmallVector<SDValue, 8> Vals;
     for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
       unsigned ShiftIntoIdx =
           (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx);
       SDValue ShiftAmount =
-          DAG.getConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), SL, IntVT);
-      SDValue ShiftedElt =
-          DAG.getNode(ISD::SRL, SL, IntVT, Load, ShiftAmount);
-      SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, ShiftedElt);
+          DAG.getConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), SL,
+                          getShiftAmountTy(LoadVT, DAG.getDataLayout()));
+      SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount);
+      SDValue Elt =
+          DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask);
+      SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt);
+
       if (ExtType != ISD::NON_EXTLOAD) {
         unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType);
         Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar);
       }
+
       Vals.push_back(Scalar);
     }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/idot8s.ll b/llvm/test/CodeGen/AMDGPU/idot8s.ll
index a54b2f59e475..38b8bc37f5f2 100644
--- a/llvm/test/CodeGen/AMDGPU/idot8s.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot8s.ll
@@ -1248,58 +1248,43 @@ define amdgpu_kernel void @idot8_acc32_vecMul(<8 x i4> addrspace(1)* %src1,
 ; GFX7-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX7-NEXT:    s_mov_b32 s2, -1
 ; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-NEXT:    s_load_dword s5, s[4:5], 0x0
-; GFX7-NEXT:    s_load_dword s7, s[6:7], 0x0
-; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-NEXT:    s_ashr_i64 s[8:9], s[4:5], 60
-; GFX7-NEXT:    s_lshl_b32 s9, s5, 4
-; GFX7-NEXT:    s_ashr_i64 s[14:15], s[8:9], 60
-; GFX7-NEXT:    s_lshl_b32 s9, s5, 16
-; GFX7-NEXT:    s_ashr_i64 s[16:17], s[8:9], 60
-; GFX7-NEXT:    s_lshl_b32 s9, s5, 20
-; GFX7-NEXT:    s_lshl_b32 s11, s5, 8
-; GFX7-NEXT:    s_lshl_b32 s13, s5, 12
-; GFX7-NEXT:    s_ashr_i64 s[18:19], s[8:9], 60
-; GFX7-NEXT:    s_lshl_b32 s9, s5, 24
-; GFX7-NEXT:    s_lshl_b32 s5, s5, 28
-; GFX7-NEXT:    s_ashr_i64 s[4:5], s[4:5], 60
-; GFX7-NEXT:    s_lshl_b32 s5, s7, 4
-; GFX7-NEXT:    s_ashr_i64 s[24:25], s[4:5], 60
-; GFX7-NEXT:    s_lshl_b32 s5, s7, 8
-; GFX7-NEXT:    s_ashr_i64 s[26:27], s[4:5], 60
-; GFX7-NEXT:    s_lshl_b32 s5, s7, 12
-; GFX7-NEXT:    s_ashr_i64 s[28:29], s[4:5], 60
-; GFX7-NEXT:    s_lshl_b32 s5, s7, 16
-; GFX7-NEXT:    s_ashr_i64 s[30:31], s[4:5], 60
-; GFX7-NEXT:    s_lshl_b32 s5, s7, 20
-; GFX7-NEXT:    s_ashr_i64 s[34:35], s[4:5], 60
-; GFX7-NEXT:    s_lshl_b32 s5, s7, 24
-; GFX7-NEXT:    s_ashr_i64 s[36:37], s[4:5], 60
-; GFX7-NEXT:    s_lshl_b32 s5, s7, 28
-; GFX7-NEXT:    s_ashr_i64 s[22:23], s[6:7], 60
-; GFX7-NEXT:    s_ashr_i64 s[6:7], s[4:5], 60
-; GFX7-NEXT:    s_load_dword s5, s[0:1], 0x0
-; GFX7-NEXT:    v_mov_b32_e32 v0, s6
-; GFX7-NEXT:    s_ashr_i64 s[20:21], s[8:9], 60
-; GFX7-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX7-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
+; GFX7-NEXT:    s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT:    s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT:    s_load_dword s20, s[0:1], 0x0
 ; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-NEXT:    v_mov_b32_e32 v1, s5
+; GFX7-NEXT:    s_ashr_i32 s6, s4, 28
+; GFX7-NEXT:    s_ashr_i32 s13, s5, 28
+; GFX7-NEXT:    s_bfe_i32 s14, s5, 0x40018
+; GFX7-NEXT:    s_bfe_i32 s15, s5, 0x40014
+; GFX7-NEXT:    s_bfe_i32 s16, s5, 0x40010
+; GFX7-NEXT:    s_bfe_i32 s17, s5, 0x4000c
+; GFX7-NEXT:    s_bfe_i32 s18, s5, 0x40008
+; GFX7-NEXT:    s_bfe_i32 s19, s5, 0x40004
+; GFX7-NEXT:    s_bfe_i32 s5, s5, 0x40000
+; GFX7-NEXT:    s_bfe_i32 s7, s4, 0x40018
+; GFX7-NEXT:    s_bfe_i32 s8, s4, 0x40014
+; GFX7-NEXT:    s_bfe_i32 s9, s4, 0x40010
+; GFX7-NEXT:    s_bfe_i32 s10, s4, 0x4000c
+; GFX7-NEXT:    s_bfe_i32 s11, s4, 0x40008
+; GFX7-NEXT:    s_bfe_i32 s12, s4, 0x40004
+; GFX7-NEXT:    s_bfe_i32 s4, s4, 0x40000
+; GFX7-NEXT:    v_mov_b32_e32 v0, s5
+; GFX7-NEXT:    v_mov_b32_e32 v1, s20
 ; GFX7-NEXT:    v_mad_i32_i24 v0, s4, v0, v1
-; GFX7-NEXT:    v_mov_b32_e32 v1, s36
-; GFX7-NEXT:    v_mad_i32_i24 v0, s20, v1, v0
-; GFX7-NEXT:    v_mov_b32_e32 v1, s34
-; GFX7-NEXT:    v_mad_i32_i24 v0, s18, v1, v0
-; GFX7-NEXT:    v_mov_b32_e32 v1, s30
-; GFX7-NEXT:    v_mad_i32_i24 v0, s16, v1, v0
-; GFX7-NEXT:    v_mov_b32_e32 v1, s28
+; GFX7-NEXT:    v_mov_b32_e32 v1, s19
 ; GFX7-NEXT:    v_mad_i32_i24 v0, s12, v1, v0
-; GFX7-NEXT:    v_mov_b32_e32 v1, s26
+; GFX7-NEXT:    v_mov_b32_e32 v1, s18
+; GFX7-NEXT:    v_mad_i32_i24 v0, s11, v1, v0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s17
 ; GFX7-NEXT:    v_mad_i32_i24 v0, s10, v1, v0
-; GFX7-NEXT:    v_mov_b32_e32 v1, s24
-; GFX7-NEXT:    v_mad_i32_i24 v0, s14, v1, v0
-; GFX7-NEXT:    v_mov_b32_e32 v1, s22
+; GFX7-NEXT:    v_mov_b32_e32 v1, s16
+; GFX7-NEXT:    v_mad_i32_i24 v0, s9, v1, v0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s15
 ; GFX7-NEXT:    v_mad_i32_i24 v0, s8, v1, v0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s14
+; GFX7-NEXT:    v_mad_i32_i24 v0, s7, v1, v0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s13
+; GFX7-NEXT:    v_mad_i32_i24 v0, s6, v1, v0
 ; GFX7-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX7-NEXT:    s_endpgm
 ;
@@ -1308,58 +1293,43 @@ define amdgpu_kernel void @idot8_acc32_vecMul(<8 x i4> addrspace(1)* %src1,
 ; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_load_dword s3, s[4:5], 0x0
-; GFX8-NEXT:    s_load_dword s5, s[6:7], 0x0
-; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_ashr_i64 s[6:7], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s7, s3, 4
-; GFX8-NEXT:    s_ashr_i64 s[14:15], s[6:7], 60
-; GFX8-NEXT:    s_lshl_b32 s7, s3, 20
-; GFX8-NEXT:    s_lshl_b32 s9, s3, 8
-; GFX8-NEXT:    s_lshl_b32 s11, s3, 12
-; GFX8-NEXT:    s_lshl_b32 s13, s3, 16
-; GFX8-NEXT:    s_ashr_i64 s[16:17], s[6:7], 60
-; GFX8-NEXT:    s_lshl_b32 s7, s3, 24
-; GFX8-NEXT:    s_lshl_b32 s3, s3, 28
-; GFX8-NEXT:    s_ashr_i64 s[2:3], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s3, s5, 4
-; GFX8-NEXT:    s_ashr_i64 s[22:23], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s3, s5, 8
-; GFX8-NEXT:    s_ashr_i64 s[24:25], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s3, s5, 12
-; GFX8-NEXT:    s_ashr_i64 s[26:27], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s3, s5, 16
-; GFX8-NEXT:    s_ashr_i64 s[28:29], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s3, s5, 20
-; GFX8-NEXT:    s_ashr_i64 s[30:31], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s3, s5, 24
-; GFX8-NEXT:    s_ashr_i64 s[34:35], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s3, s5, 28
-; GFX8-NEXT:    s_ashr_i64 s[20:21], s[4:5], 60
-; GFX8-NEXT:    s_ashr_i64 s[4:5], s[2:3], 60
-; GFX8-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX8-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NEXT:    s_ashr_i64 s[18:19], s[6:7], 60
-; GFX8-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX8-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
+; GFX8-NEXT:    s_load_dword s2, s[4:5], 0x0
+; GFX8-NEXT:    s_load_dword s3, s[6:7], 0x0
+; GFX8-NEXT:    s_load_dword s18, s[0:1], 0x0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8-NEXT:    s_ashr_i32 s4, s2, 28
+; GFX8-NEXT:    s_ashr_i32 s11, s3, 28
+; GFX8-NEXT:    s_bfe_i32 s12, s3, 0x40018
+; GFX8-NEXT:    s_bfe_i32 s13, s3, 0x40014
+; GFX8-NEXT:    s_bfe_i32 s14, s3, 0x40010
+; GFX8-NEXT:    s_bfe_i32 s15, s3, 0x4000c
+; GFX8-NEXT:    s_bfe_i32 s16, s3, 0x40008
+; GFX8-NEXT:    s_bfe_i32 s17, s3, 0x40004
+; GFX8-NEXT:    s_bfe_i32 s3, s3, 0x40000
+; GFX8-NEXT:    s_bfe_i32 s5, s2, 0x40018
+; GFX8-NEXT:    s_bfe_i32 s6, s2, 0x40014
+; GFX8-NEXT:    s_bfe_i32 s7, s2, 0x40010
+; GFX8-NEXT:    s_bfe_i32 s8, s2, 0x4000c
+; GFX8-NEXT:    s_bfe_i32 s9, s2, 0x40008
+; GFX8-NEXT:    s_bfe_i32 s10, s2, 0x40004
+; GFX8-NEXT:    s_bfe_i32 s2, s2, 0x40000
+; GFX8-NEXT:    v_mov_b32_e32 v0, s3
+; GFX8-NEXT:    v_mov_b32_e32 v1, s18
 ; GFX8-NEXT:    v_mad_i32_i24 v0, s2, v0, v1
-; GFX8-NEXT:    v_mov_b32_e32 v1, s34
-; GFX8-NEXT:    v_mad_i32_i24 v0, s18, v1, v0
-; GFX8-NEXT:    v_mov_b32_e32 v1, s30
-; GFX8-NEXT:    v_mad_i32_i24 v0, s16, v1, v0
-; GFX8-NEXT:    v_mov_b32_e32 v1, s28
-; GFX8-NEXT:    v_mad_i32_i24 v0, s12, v1, v0
-; GFX8-NEXT:    v_mov_b32_e32 v1, s26
+; GFX8-NEXT:    v_mov_b32_e32 v1, s17
 ; GFX8-NEXT:    v_mad_i32_i24 v0, s10, v1, v0
-; GFX8-NEXT:    s_ashr_i64 s[8:9], s[8:9], 60
-; GFX8-NEXT:    v_mov_b32_e32 v1, s24
+; GFX8-NEXT:    v_mov_b32_e32 v1, s16
+; GFX8-NEXT:    v_mad_i32_i24 v0, s9, v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s15
 ; GFX8-NEXT:    v_mad_i32_i24 v0, s8, v1, v0
-; GFX8-NEXT:    v_mov_b32_e32 v1, s22
-; GFX8-NEXT:    v_mad_i32_i24 v0, s14, v1, v0
-; GFX8-NEXT:    v_mov_b32_e32 v1, s20
-; GFX8-NEXT:    v_mad_i32_i24 v2, s6, v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s14
+; GFX8-NEXT:    v_mad_i32_i24 v0, s7, v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s13
+; GFX8-NEXT:    v_mad_i32_i24 v0, s6, v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s12
+; GFX8-NEXT:    v_mad_i32_i24 v0, s5, v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s11
+; GFX8-NEXT:    v_mad_i32_i24 v2, s4, v1, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    flat_store_dword v[0:1], v2
@@ -1370,58 +1340,43 @@ define amdgpu_kernel void @idot8_acc32_vecMul(<8 x i4> addrspace(1)* %src1,
 ; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x0
-; GFX9-NEXT:    s_load_dword s5, s[6:7], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_ashr_i64 s[6:7], s[2:3], 60
-; GFX9-NEXT:    s_lshl_b32 s7, s3, 4
-; GFX9-NEXT:    s_ashr_i64 s[14:15], s[6:7], 60
-; GFX9-NEXT:    s_lshl_b32 s7, s3, 20
-; GFX9-NEXT:    s_lshl_b32 s9, s3, 8
-; GFX9-NEXT:    s_lshl_b32 s11, s3, 12
-; GFX9-NEXT:    s_lshl_b32 s13, s3, 16
-; GFX9-NEXT:    s_ashr_i64 s[16:17], s[6:7], 60
-; GFX9-NEXT:    s_lshl_b32 s7, s3, 24
-; GFX9-NEXT:    s_lshl_b32 s3, s3, 28
-; GFX9-NEXT:    s_ashr_i64 s[2:3], s[2:3], 60
-; GFX9-NEXT:    s_lshl_b32 s3, s5, 4
-; GFX9-NEXT:    s_ashr_i64 s[22:23], s[2:3], 60
-; GFX9-NEXT:    s_lshl_b32 s3, s5, 8
-; GFX9-NEXT:    s_ashr_i64 s[24:25], s[2:3], 60
-; GFX9-NEXT:    s_lshl_b32 s3, s5, 12
-; GFX9-NEXT:    s_ashr_i64 s[26:27], s[2:3], 60
-; GFX9-NEXT:    s_lshl_b32 s3, s5, 16
-; GFX9-NEXT:    s_ashr_i64 s[28:29], s[2:3], 60
-; GFX9-NEXT:    s_lshl_b32 s3, s5, 20
-; GFX9-NEXT:    s_ashr_i64 s[30:31], s[2:3], 60
-; GFX9-NEXT:    s_lshl_b32 s3, s5, 24
-; GFX9-NEXT:    s_ashr_i64 s[34:35], s[2:3], 60
-; GFX9-NEXT:    s_lshl_b32 s3, s5, 28
-; GFX9-NEXT:    s_ashr_i64 s[20:21], s[4:5], 60
-; GFX9-NEXT:    s_ashr_i64 s[4:5], s[2:3], 60
-; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:    s_ashr_i64 s[18:19], s[6:7], 60
-; GFX9-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX9-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
+; GFX9-NEXT:    s_load_dword s2, s[4:5], 0x0
+; GFX9-NEXT:    s_load_dword s3, s[6:7], 0x0
+; GFX9-NEXT:    s_load_dword s18, s[0:1], 0x0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    s_ashr_i32 s4, s2, 28
+; GFX9-NEXT:    s_ashr_i32 s11, s3, 28
+; GFX9-NEXT:    s_bfe_i32 s12, s3, 0x40018
+; GFX9-NEXT:    s_bfe_i32 s13, s3, 0x40014
+; GFX9-NEXT:    s_bfe_i32 s14, s3, 0x40010
+; GFX9-NEXT:    s_bfe_i32 s15, s3, 0x4000c
+; GFX9-NEXT:    s_bfe_i32 s16, s3, 0x40008
+; GFX9-NEXT:    s_bfe_i32 s17, s3, 0x40004
+; GFX9-NEXT:    s_bfe_i32 s3, s3, 0x40000
+; GFX9-NEXT:    s_bfe_i32 s5, s2, 0x40018
+; GFX9-NEXT:    s_bfe_i32 s6, s2, 0x40014
+; GFX9-NEXT:    s_bfe_i32 s7, s2, 0x40010
+; GFX9-NEXT:    s_bfe_i32 s8, s2, 0x4000c
+; GFX9-NEXT:    s_bfe_i32 s9, s2, 0x40008
+; GFX9-NEXT:    s_bfe_i32 s10, s2, 0x40004
+; GFX9-NEXT:    s_bfe_i32 s2, s2, 0x40000
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s18
 ; GFX9-NEXT:    v_mad_i32_i24 v0, s2, v0, v1
-; GFX9-NEXT:    v_mov_b32_e32 v1, s34
-; GFX9-NEXT:    v_mad_i32_i24 v0, s18, v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s30
-; GFX9-NEXT:    v_mad_i32_i24 v0, s16, v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s28
-; GFX9-NEXT:    v_mad_i32_i24 v0, s12, v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s26
+; GFX9-NEXT:    v_mov_b32_e32 v1, s17
 ; GFX9-NEXT:    v_mad_i32_i24 v0, s10, v1, v0
-; GFX9-NEXT:    s_ashr_i64 s[8:9], s[8:9], 60
-; GFX9-NEXT:    v_mov_b32_e32 v1, s24
+; GFX9-NEXT:    v_mov_b32_e32 v1, s16
+; GFX9-NEXT:    v_mad_i32_i24 v0, s9, v1, v0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s15
 ; GFX9-NEXT:    v_mad_i32_i24 v0, s8, v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s22
-; GFX9-NEXT:    v_mad_i32_i24 v0, s14, v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s20
-; GFX9-NEXT:    v_mad_i32_i24 v2, s6, v1, v0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s14
+; GFX9-NEXT:    v_mad_i32_i24 v0, s7, v1, v0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s13
+; GFX9-NEXT:    v_mad_i32_i24 v0, s6, v1, v0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s12
+; GFX9-NEXT:    v_mad_i32_i24 v0, s5, v1, v0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s11
+; GFX9-NEXT:    v_mad_i32_i24 v2, s4, v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-NEXT:    global_store_dword v[0:1], v2, off
@@ -1432,58 +1387,13 @@ define amdgpu_kernel void @idot8_acc32_vecMul(<8 x i4> addrspace(1)* %src1,
 ; GFX9-DL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX9-DL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX9-DL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-DL-NEXT:    s_load_dword s3, s[4:5], 0x0
-; GFX9-DL-NEXT:    s_load_dword s5, s[6:7], 0x0
-; GFX9-DL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-DL-NEXT:    s_ashr_i64 s[6:7], s[2:3], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s7, s3, 4
-; GFX9-DL-NEXT:    s_ashr_i64 s[14:15], s[6:7], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s7, s3, 20
-; GFX9-DL-NEXT:    s_lshl_b32 s9, s3, 8
-; GFX9-DL-NEXT:    s_lshl_b32 s11, s3, 12
-; GFX9-DL-NEXT:    s_lshl_b32 s13, s3, 16
-; GFX9-DL-NEXT:    s_ashr_i64 s[16:17], s[6:7], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s7, s3, 24
-; GFX9-DL-NEXT:    s_lshl_b32 s3, s3, 28
-; GFX9-DL-NEXT:    s_ashr_i64 s[2:3], s[2:3], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s3, s5, 4
-; GFX9-DL-NEXT:    s_ashr_i64 s[22:23], s[2:3], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s3, s5, 8
-; GFX9-DL-NEXT:    s_ashr_i64 s[24:25], s[2:3], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s3, s5, 12
-; GFX9-DL-NEXT:    s_ashr_i64 s[26:27], s[2:3], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s3, s5, 16
-; GFX9-DL-NEXT:    s_ashr_i64 s[28:29], s[2:3], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s3, s5, 20
-; GFX9-DL-NEXT:    s_ashr_i64 s[30:31], s[2:3], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s3, s5, 24
-; GFX9-DL-NEXT:    s_ashr_i64 s[34:35], s[2:3], 60
-; GFX9-DL-NEXT:    s_lshl_b32 s3, s5, 28
-; GFX9-DL-NEXT:    s_ashr_i64 s[20:21], s[4:5], 60
-; GFX9-DL-NEXT:    s_ashr_i64 s[4:5], s[2:3], 60
+; GFX9-DL-NEXT:    s_load_dword s2, s[6:7], 0x0
 ; GFX9-DL-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX9-DL-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-DL-NEXT:    s_ashr_i64 s[18:19], s[6:7], 60
-; GFX9-DL-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX9-DL-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
+; GFX9-DL-NEXT:    s_load_dword s4, s[4:5], 0x0
 ; GFX9-DL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-DL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, s2, v0, v1
-; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s34
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, s18, v1, v0
-; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s30
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, s16, v1, v0
-; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s28
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, s12, v1, v0
-; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s26
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, s10, v1, v0
-; GFX9-DL-NEXT:    s_ashr_i64 s[8:9], s[8:9], 60
-; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s24
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, s8, v1, v0
-; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s22
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, s14, v1, v0
-; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s20
-; GFX9-DL-NEXT:    v_mad_i32_i24 v2, s6, v1, v0
+; GFX9-DL-NEXT:    v_dot8_i32_i4 v2, s4, v0, v1
 ; GFX9-DL-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-DL-NEXT:    global_store_dword v[0:1], v2, off
@@ -1491,55 +1401,18 @@ define amdgpu_kernel void @idot8_acc32_vecMul(<8 x i4> addrspace(1)* %src1,
 ;
 ; GFX10-DL-LABEL: idot8_acc32_vecMul:
 ; GFX10-DL:       ; %bb.0: ; %entry
-; GFX10-DL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX10-DL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX10-DL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GFX10-DL-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX10-DL-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-DL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-DL-NEXT:    s_load_dword s3, s[4:5], 0x0
-; GFX10-DL-NEXT:    s_load_dword s5, s[6:7], 0x0
-; GFX10-DL-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX10-DL-NEXT:    s_load_dword s6, s[4:5], 0x0
+; GFX10-DL-NEXT:    s_load_dword s0, s[0:1], 0x0
+; GFX10-DL-NEXT:    s_load_dword s1, s[2:3], 0x0
 ; GFX10-DL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-DL-NEXT:    s_lshl_b32 s7, s3, 28
-; GFX10-DL-NEXT:    s_lshl_b32 s9, s5, 28
-; GFX10-DL-NEXT:    s_lshl_b32 s11, s3, 24
-; GFX10-DL-NEXT:    s_lshl_b32 s13, s5, 24
-; GFX10-DL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX10-DL-NEXT:    s_ashr_i64 s[6:7], s[6:7], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[8:9], s[8:9], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX10-DL-NEXT:    s_lshl_b32 s7, s3, 20
-; GFX10-DL-NEXT:    s_lshl_b32 s9, s5, 20
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, s6, s8, v0
-; GFX10-DL-NEXT:    s_lshl_b32 s11, s3, 16
-; GFX10-DL-NEXT:    s_lshl_b32 s13, s5, 16
-; GFX10-DL-NEXT:    s_ashr_i64 s[6:7], s[6:7], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[8:9], s[8:9], 60
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, s10, s12, v0
-; GFX10-DL-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX10-DL-NEXT:    s_lshl_b32 s7, s3, 12
-; GFX10-DL-NEXT:    s_lshl_b32 s9, s5, 12
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, s6, s8, v0
-; GFX10-DL-NEXT:    s_lshl_b32 s11, s3, 8
-; GFX10-DL-NEXT:    s_lshl_b32 s13, s5, 8
-; GFX10-DL-NEXT:    s_ashr_i64 s[6:7], s[6:7], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[8:9], s[8:9], 60
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, s10, s12, v0
-; GFX10-DL-NEXT:    s_lshl_b32 s7, s3, 4
-; GFX10-DL-NEXT:    s_lshl_b32 s9, s5, 4
-; GFX10-DL-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, s6, s8, v0
-; GFX10-DL-NEXT:    s_ashr_i64 s[6:7], s[6:7], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[8:9], s[8:9], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[2:3], s[2:3], 60
-; GFX10-DL-NEXT:    s_ashr_i64 s[4:5], s[4:5], 60
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, s10, s12, v0
-; GFX10-DL-NEXT:    v_mad_i32_i24 v0, s6, s8, v0
-; GFX10-DL-NEXT:    v_mad_i32_i24 v2, s2, s4, v0
-; GFX10-DL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX10-DL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10-DL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX10-DL-NEXT:    v_dot8_i32_i4 v2, s0, s1, v0
+; GFX10-DL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX10-DL-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX10-DL-NEXT:    global_store_dword v[0:1], v2, off
 ; GFX10-DL-NEXT:    s_endpgm
                                               <8 x i4> addrspace(1)* %src2,
@@ -1642,60 +1515,46 @@ define amdgpu_kernel void @idot8_acc16_vecMul(<8 x i4> addrspace(1)* %src1,
 ; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_load_dword s3, s[6:7], 0x0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    flat_load_ushort v2, v[0:1]
-; GFX8-NEXT:    s_load_dword s1, s[4:5], 0x0
+; GFX8-NEXT:    s_load_dword s0, s[4:5], 0x0
+; GFX8-NEXT:    s_load_dword s1, s[6:7], 0x0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_lshl_b32 s27, s3, 28
-; GFX8-NEXT:    s_ashr_i64 s[16:17], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s19, s3, 8
-; GFX8-NEXT:    s_lshl_b32 s21, s3, 12
-; GFX8-NEXT:    s_lshl_b32 s15, s1, 28
-; GFX8-NEXT:    s_lshl_b32 s23, s3, 16
-; GFX8-NEXT:    s_lshl_b32 s25, s3, 24
-; GFX8-NEXT:    s_lshl_b32 s17, s3, 4
-; GFX8-NEXT:    s_lshl_b32 s3, s3, 20
-; GFX8-NEXT:    s_ashr_i64 s[4:5], s[0:1], 60
-; GFX8-NEXT:    s_ashr_i64 s[26:27], s[26:27], 60
-; GFX8-NEXT:    s_lshl_b32 s7, s1, 8
-; GFX8-NEXT:    s_lshl_b32 s9, s1, 12
-; GFX8-NEXT:    s_lshl_b32 s11, s1, 16
-; GFX8-NEXT:    s_lshl_b32 s13, s1, 24
-; GFX8-NEXT:    s_lshl_b32 s5, s1, 4
-; GFX8-NEXT:    s_lshl_b32 s1, s1, 20
-; GFX8-NEXT:    s_ashr_i64 s[24:25], s[24:25], 60
-; GFX8-NEXT:    s_ashr_i64 s[2:3], s[2:3], 60
-; GFX8-NEXT:    s_ashr_i64 s[14:15], s[14:15], 60
-; GFX8-NEXT:    v_mov_b32_e32 v4, s26
-; GFX8-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX8-NEXT:    s_ashr_i64 s[0:1], s[0:1], 60
-; GFX8-NEXT:    v_mov_b32_e32 v3, s2
-; GFX8-NEXT:    v_mov_b32_e32 v5, s24
-; GFX8-NEXT:    s_ashr_i64 s[22:23], s[22:23], 60
+; GFX8-NEXT:    s_bfe_i32 s8, s0, 0x40000
+; GFX8-NEXT:    s_bfe_i32 s15, s1, 0x40000
+; GFX8-NEXT:    s_bfe_i32 s10, s1, 0x40018
+; GFX8-NEXT:    s_bfe_i32 s11, s1, 0x40014
+; GFX8-NEXT:    s_bfe_i32 s12, s1, 0x40010
+; GFX8-NEXT:    s_bfe_i32 s13, s1, 0x4000c
+; GFX8-NEXT:    s_bfe_i32 s14, s1, 0x40004
+; GFX8-NEXT:    s_ashr_i32 s9, s1, 28
+; GFX8-NEXT:    s_bfe_i32 s1, s1, 0x40008
+; GFX8-NEXT:    v_mov_b32_e32 v4, s15
+; GFX8-NEXT:    s_ashr_i32 s2, s0, 28
+; GFX8-NEXT:    s_bfe_i32 s3, s0, 0x40018
+; GFX8-NEXT:    s_bfe_i32 s4, s0, 0x40014
+; GFX8-NEXT:    s_bfe_i32 s5, s0, 0x40010
+; GFX8-NEXT:    s_bfe_i32 s6, s0, 0x4000c
+; GFX8-NEXT:    s_bfe_i32 s7, s0, 0x40004
+; GFX8-NEXT:    s_bfe_i32 s0, s0, 0x40008
+; GFX8-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8-NEXT:    v_mov_b32_e32 v5, s14
 ; GFX8-NEXT:    v_mul_i32_i24_e32 v3, s0, v3
-; GFX8-NEXT:    s_ashr_i64 s[20:21], s[20:21], 60
-; GFX8-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
-; GFX8-NEXT:    v_mov_b32_e32 v6, s22
-; GFX8-NEXT:    s_ashr_i64 s[18:19], s[18:19], 60
-; GFX8-NEXT:    s_ashr_i64 s[8:9], s[8:9], 60
-; GFX8-NEXT:    v_mov_b32_e32 v7, s20
-; GFX8-NEXT:    s_ashr_i64 s[30:31], s[16:17], 60
-; GFX8-NEXT:    s_ashr_i64 s[6:7], s[6:7], 60
-; GFX8-NEXT:    v_mov_b32_e32 v8, s18
-; GFX8-NEXT:    s_ashr_i64 s[28:29], s[4:5], 60
-; GFX8-NEXT:    v_mov_b32_e32 v9, s30
+; GFX8-NEXT:    v_mov_b32_e32 v6, s13
+; GFX8-NEXT:    v_mov_b32_e32 v7, s12
+; GFX8-NEXT:    v_mov_b32_e32 v8, s11
+; GFX8-NEXT:    v_mov_b32_e32 v9, s10
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_mad_i32_i24 v2, s14, v4, v2
-; GFX8-NEXT:    v_mad_i32_i24 v2, s12, v5, v2
+; GFX8-NEXT:    v_mad_i32_i24 v2, s8, v4, v2
+; GFX8-NEXT:    v_mad_i32_i24 v2, s7, v5, v2
 ; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
-; GFX8-NEXT:    v_mad_i32_i24 v2, s10, v6, v2
-; GFX8-NEXT:    v_mad_i32_i24 v2, s8, v7, v2
-; GFX8-NEXT:    v_mad_i32_i24 v2, s6, v8, v2
-; GFX8-NEXT:    v_mad_i32_i24 v2, s28, v9, v2
-; GFX8-NEXT:    v_mov_b32_e32 v3, s16
-; GFX8-NEXT:    v_mad_i32_i24 v2, s4, v3, v2
+; GFX8-NEXT:    v_mad_i32_i24 v2, s6, v6, v2
+; GFX8-NEXT:    v_mad_i32_i24 v2, s5, v7, v2
+; GFX8-NEXT:    v_mad_i32_i24 v2, s4, v8, v2
+; GFX8-NEXT:    v_mad_i32_i24 v2, s3, v9, v2
+; GFX8-NEXT:    v_mov_b32_e32 v3, s9
+; GFX8-NEXT:    v_mad_i32_i24 v2, s2, v3, v2
 ; GFX8-NEXT:    flat_store_short v[0:1], v2
 ; GFX8-NEXT:    s_endpgm
 ;
@@ -2021,83 +1880,69 @@ define amdgpu_kernel void @idot8_acc8_vecMul(<8 x i4> addrspace(1)* %src1,
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GFX8-NEXT:    s_mov_b32 s33, 0xffff
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    flat_load_ubyte v2, v[0:1]
 ; GFX8-NEXT:    s_load_dword s1, s[4:5], 0x0
-; GFX8-NEXT:    s_load_dword s3, s[6:7], 0x0
+; GFX8-NEXT:    s_load_dword s2, s[6:7], 0x0
+; GFX8-NEXT:    s_mov_b32 s0, 0xffff
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_lshl_b32 s11, s1, 24
-; GFX8-NEXT:    s_lshl_b32 s15, s1, 16
-; GFX8-NEXT:    s_ashr_i64 s[20:21], s[2:3], 60
-; GFX8-NEXT:    s_lshl_b32 s23, s3, 24
-; GFX8-NEXT:    s_lshl_b32 s25, s3, 28
-; GFX8-NEXT:    s_lshl_b32 s27, s3, 16
-; GFX8-NEXT:    s_ashr_i64 s[8:9], s[0:1], 60
-; GFX8-NEXT:    s_lshl_b32 s13, s1, 28
-; GFX8-NEXT:    s_lshl_b32 s17, s3, 8
-; GFX8-NEXT:    s_lshl_b32 s19, s3, 12
-; GFX8-NEXT:    s_lshl_b32 s21, s3, 4
-; GFX8-NEXT:    s_lshl_b32 s3, s3, 20
-; GFX8-NEXT:    s_ashr_i64 s[10:11], s[10:11], 60
-; GFX8-NEXT:    s_ashr_i64 s[14:15], s[14:15], 60
-; GFX8-NEXT:    s_ashr_i64 s[22:23], s[22:23], 60
-; GFX8-NEXT:    s_ashr_i64 s[24:25], s[24:25], 60
-; GFX8-NEXT:    s_ashr_i64 s[26:27], s[26:27], 60
-; GFX8-NEXT:    s_lshl_b32 s5, s1, 8
-; GFX8-NEXT:    s_lshl_b32 s7, s1, 12
-; GFX8-NEXT:    s_lshl_b32 s9, s1, 4
-; GFX8-NEXT:    s_lshl_b32 s1, s1, 20
-; GFX8-NEXT:    s_ashr_i64 s[2:3], s[2:3], 60
-; GFX8-NEXT:    s_ashr_i64 s[12:13], s[12:13], 60
-; GFX8-NEXT:    v_mov_b32_e32 v6, s26
+; GFX8-NEXT:    s_bfe_i32 s7, s1, 0x40004
+; GFX8-NEXT:    s_bfe_i32 s9, s1, 0x4000c
+; GFX8-NEXT:    s_bfe_i32 s14, s2, 0x40004
+; GFX8-NEXT:    s_bfe_i32 s15, s2, 0x40000
+; GFX8-NEXT:    s_bfe_i32 s16, s2, 0x4000c
+; GFX8-NEXT:    s_bfe_i32 s3, s1, 0x40014
+; GFX8-NEXT:    s_ashr_i32 s5, s1, 28
+; GFX8-NEXT:    s_bfe_i32 s10, s2, 0x40014
+; GFX8-NEXT:    s_bfe_i32 s11, s2, 0x40010
+; GFX8-NEXT:    s_ashr_i32 s12, s2, 28
+; GFX8-NEXT:    s_bfe_i32 s13, s2, 0x40018
+; GFX8-NEXT:    s_bfe_i32 s2, s2, 0x40008
+; GFX8-NEXT:    s_bfe_i32 s8, s1, 0x40000
+; GFX8-NEXT:    v_mov_b32_e32 v4, s16
+; GFX8-NEXT:    v_mov_b32_e32 v5, s9
+; GFX8-NEXT:    v_mov_b32_e32 v6, s15
 ; GFX8-NEXT:    v_mov_b32_e32 v7, s14
-; GFX8-NEXT:    v_mov_b32_e32 v8, s24
-; GFX8-NEXT:    v_mov_b32_e32 v9, s22
-; GFX8-NEXT:    v_mov_b32_e32 v10, s10
-; GFX8-NEXT:    v_mul_i32_i24_sdwa v6, v7, v6 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT:    v_mul_i32_i24_e32 v7, s12, v8
-; GFX8-NEXT:    v_mul_i32_i24_sdwa v8, v10, v9 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT:    s_ashr_i64 s[0:1], s[0:1], 60
-; GFX8-NEXT:    v_mov_b32_e32 v5, s2
-; GFX8-NEXT:    v_mul_i32_i24_e32 v5, s0, v5
-; GFX8-NEXT:    v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX8-NEXT:    s_ashr_i64 s[4:5], s[4:5], 60
-; GFX8-NEXT:    s_ashr_i64 s[16:17], s[16:17], 60
-; GFX8-NEXT:    v_or_b32_sdwa v5, v5, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX8-NEXT:    v_and_b32_e32 v6, s33, v7
-; GFX8-NEXT:    s_ashr_i64 s[18:19], s[18:19], 60
-; GFX8-NEXT:    v_mov_b32_e32 v3, s20
-; GFX8-NEXT:    v_mov_b32_e32 v4, s8
-; GFX8-NEXT:    s_ashr_i64 s[30:31], s[20:21], 60
-; GFX8-NEXT:    v_mul_i32_i24_sdwa v3, v4, v3 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT:    v_or_b32_e32 v5, v6, v5
-; GFX8-NEXT:    s_ashr_i64 s[6:7], s[6:7], 60
-; GFX8-NEXT:    v_mov_b32_e32 v4, s18
-; GFX8-NEXT:    v_mov_b32_e32 v12, s16
-; GFX8-NEXT:    v_mov_b32_e32 v13, s4
-; GFX8-NEXT:    s_ashr_i64 s[28:29], s[8:9], 60
-; GFX8-NEXT:    v_mov_b32_e32 v11, s30
-; GFX8-NEXT:    v_mul_i32_i24_e32 v4, s6, v4
-; GFX8-NEXT:    v_mul_i32_i24_sdwa v10, v13, v12 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT:    v_lshrrev_b32_e32 v7, 8, v5
-; GFX8-NEXT:    v_or_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX8-NEXT:    v_mul_i32_i24_e32 v9, s28, v11
-; GFX8-NEXT:    v_or_b32_sdwa v3, v9, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX8-NEXT:    v_and_b32_e32 v4, s33, v4
-; GFX8-NEXT:    v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT:    v_lshrrev_b32_e32 v8, 8, v3
+; GFX8-NEXT:    v_mov_b32_e32 v8, s7
+; GFX8-NEXT:    v_mul_i32_i24_sdwa v4, v5, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    v_mul_i32_i24_e32 v5, s8, v6
+; GFX8-NEXT:    v_mul_i32_i24_sdwa v6, v8, v7 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    s_bfe_i32 s4, s1, 0x40010
+; GFX8-NEXT:    s_bfe_i32 s6, s1, 0x40018
+; GFX8-NEXT:    v_mov_b32_e32 v9, s13
+; GFX8-NEXT:    s_bfe_i32 s1, s1, 0x40008
+; GFX8-NEXT:    v_mov_b32_e32 v3, s2
+; GFX8-NEXT:    v_mov_b32_e32 v10, s12
+; GFX8-NEXT:    v_mov_b32_e32 v11, s5
+; GFX8-NEXT:    v_mov_b32_e32 v12, s11
+; GFX8-NEXT:    v_mov_b32_e32 v13, s10
+; GFX8-NEXT:    v_mov_b32_e32 v14, s3
+; GFX8-NEXT:    v_mul_i32_i24_e32 v3, s1, v3
+; GFX8-NEXT:    v_or_b32_sdwa v5, v5, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_i32_i24_e32 v7, s6, v9
+; GFX8-NEXT:    v_mul_i32_i24_sdwa v8, v11, v10 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    v_mul_i32_i24_e32 v9, s4, v12
+; GFX8-NEXT:    v_mul_i32_i24_sdwa v10, v14, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    v_and_b32_e32 v5, s0, v5
+; GFX8-NEXT:    v_or_b32_sdwa v3, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_sdwa v9, v9, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_sdwa v7, v7, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    v_and_b32_e32 v4, s0, v9
+; GFX8-NEXT:    v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT:    v_or_b32_e32 v6, v4, v7
+; GFX8-NEXT:    v_lshrrev_b32_e32 v7, 8, v3
+; GFX8-NEXT:    v_lshrrev_b32_e32 v8, 8, v6
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v6
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v5
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v7, v2
-; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_0
-; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_0
+; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v4
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v8, v2
-; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_add_u32_sdwa v2, vcc, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
 ; GFX8-NEXT:    flat_store_byte v[0:1], v2
 ; GFX8-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll b/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll
index 955f48184c3c..635f31a0ce3d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll
@@ -179,13 +179,15 @@ define arm_aapcs_vfpcc <2 x i64> @bitcast_to_v2i1(i2 %b, <2 x i64> %a) {
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
 ; CHECK-LE-NEXT:    sub sp, #4
-; CHECK-LE-NEXT:    and r0, r0, #3
-; CHECK-LE-NEXT:    sbfx r1, r0, #0, #1
-; CHECK-LE-NEXT:    sbfx r0, r0, #1, #1
-; CHECK-LE-NEXT:    vmov.32 q1[0], r1
-; CHECK-LE-NEXT:    vmov.32 q1[1], r1
-; CHECK-LE-NEXT:    vmov.32 q1[2], r0
-; CHECK-LE-NEXT:    vmov.32 q1[3], r0
+; CHECK-LE-NEXT:    and r1, r0, #2
+; CHECK-LE-NEXT:    and r0, r0, #1
+; CHECK-LE-NEXT:    rsbs r0, r0, #0
+; CHECK-LE-NEXT:    movs r2, #0
+; CHECK-LE-NEXT:    vmov.32 q1[0], r0
+; CHECK-LE-NEXT:    sub.w r1, r2, r1, lsr #1
+; CHECK-LE-NEXT:    vmov.32 q1[1], r0
+; CHECK-LE-NEXT:    vmov.32 q1[2], r1
+; CHECK-LE-NEXT:    vmov.32 q1[3], r1
 ; CHECK-LE-NEXT:    vand q0, q0, q1
 ; CHECK-LE-NEXT:    add sp, #4
 ; CHECK-LE-NEXT:    bx lr
@@ -194,9 +196,11 @@ define arm_aapcs_vfpcc <2 x i64> @bitcast_to_v2i1(i2 %b, <2 x i64> %a) {
 ; CHECK-BE:       @ %bb.0: @ %entry
 ; CHECK-BE-NEXT:    .pad #4
 ; CHECK-BE-NEXT:    sub sp, #4
-; CHECK-BE-NEXT:    and r0, r0, #3
-; CHECK-BE-NEXT:    sbfx r1, r0, #0, #1
-; CHECK-BE-NEXT:    sbfx r0, r0, #1, #1
+; CHECK-BE-NEXT:    and r1, r0, #2
+; CHECK-BE-NEXT:    movs r2, #0
+; CHECK-BE-NEXT:    and r0, r0, #1
+; CHECK-BE-NEXT:    sub.w r1, r2, r1, lsr #1
+; CHECK-BE-NEXT:    rsbs r0, r0, #0
 ; CHECK-BE-NEXT:    vmov.32 q1[0], r1
 ; CHECK-BE-NEXT:    vmov.32 q1[1], r1
 ; CHECK-BE-NEXT:    vmov.32 q1[2], r0

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll b/llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll
index 0dbbe3f663c3..e299cf589156 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll
@@ -144,9 +144,11 @@ define arm_aapcs_vfpcc <2 x i64> @load_v2i1(<2 x i1> *%src, <2 x i64> %a) {
 ; CHECK-LE-LABEL: load_v2i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    ldrb r0, [r0]
-; CHECK-LE-NEXT:    sbfx r1, r0, #0, #1
-; CHECK-LE-NEXT:    sbfx r0, r0, #1, #1
+; CHECK-LE-NEXT:    and r1, r0, #1
+; CHECK-LE-NEXT:    rsbs r1, r1, #0
+; CHECK-LE-NEXT:    ubfx r0, r0, #1, #1
 ; CHECK-LE-NEXT:    vmov.32 q1[0], r1
+; CHECK-LE-NEXT:    rsbs r0, r0, #0
 ; CHECK-LE-NEXT:    vmov.32 q1[1], r1
 ; CHECK-LE-NEXT:    vmov.32 q1[2], r0
 ; CHECK-LE-NEXT:    vmov.32 q1[3], r0
@@ -156,8 +158,10 @@ define arm_aapcs_vfpcc <2 x i64> @load_v2i1(<2 x i1> *%src, <2 x i64> %a) {
 ; CHECK-BE-LABEL: load_v2i1:
 ; CHECK-BE:       @ %bb.0: @ %entry
 ; CHECK-BE-NEXT:    ldrb r0, [r0]
-; CHECK-BE-NEXT:    sbfx r1, r0, #0, #1
-; CHECK-BE-NEXT:    sbfx r0, r0, #1, #1
+; CHECK-BE-NEXT:    ubfx r1, r0, #1, #1
+; CHECK-BE-NEXT:    and r0, r0, #1
+; CHECK-BE-NEXT:    rsbs r1, r1, #0
+; CHECK-BE-NEXT:    rsbs r0, r0, #0
 ; CHECK-BE-NEXT:    vmov.32 q1[0], r1
 ; CHECK-BE-NEXT:    vmov.32 q1[1], r1
 ; CHECK-BE-NEXT:    vmov.32 q1[2], r0

diff  --git a/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll b/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
index b30a81932d89..7b7ddf72123b 100644
--- a/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
+++ b/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
@@ -611,11 +611,12 @@ define void @load_v2i1_broadcast_1_v1i1_store(<2 x i1>* %a0,<1 x i1>* %a1) {
 define void @load_v3i1_broadcast_1_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
 ; AVX512-LABEL: load_v3i1_broadcast_1_v1i1_store:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    movzbl (%rdi), %eax
+; AVX512-NEXT:    movb (%rdi), %al
+; AVX512-NEXT:    shrb %al
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    btl $1, %eax
+; AVX512-NEXT:    testb $1, %al
 ; AVX512-NEXT:    movl $255, %eax
-; AVX512-NEXT:    cmovael %ecx, %eax
+; AVX512-NEXT:    cmovel %ecx, %eax
 ; AVX512-NEXT:    kmovd %eax, %k0
 ; AVX512-NEXT:    kshiftrb $1, %k0, %k0
 ; AVX512-NEXT:    kmovb %k0, (%rsi)
@@ -623,11 +624,12 @@ define void @load_v3i1_broadcast_1_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
 ;
 ; AVX512NOTDQ-LABEL: load_v3i1_broadcast_1_v1i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    movb (%rdi), %al
+; AVX512NOTDQ-NEXT:    shrb %al
 ; AVX512NOTDQ-NEXT:    xorl %ecx, %ecx
-; AVX512NOTDQ-NEXT:    btl $1, %eax
+; AVX512NOTDQ-NEXT:    testb $1, %al
 ; AVX512NOTDQ-NEXT:    movl $255, %eax
-; AVX512NOTDQ-NEXT:    cmovael %ecx, %eax
+; AVX512NOTDQ-NEXT:    cmovel %ecx, %eax
 ; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $1, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kmovd %k0, %eax
@@ -641,24 +643,22 @@ define void @load_v3i1_broadcast_1_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
 define void @load_v3i1_broadcast_2_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
 ; AVX512-LABEL: load_v3i1_broadcast_2_v1i1_store:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    movzbl (%rdi), %eax
-; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    btl $2, %eax
-; AVX512-NEXT:    movl $255, %eax
-; AVX512-NEXT:    cmovael %ecx, %eax
-; AVX512-NEXT:    kmovd %eax, %k0
+; AVX512-NEXT:    xorl %eax, %eax
+; AVX512-NEXT:    testb $4, (%rdi)
+; AVX512-NEXT:    movl $255, %ecx
+; AVX512-NEXT:    cmovel %eax, %ecx
+; AVX512-NEXT:    kmovd %ecx, %k0
 ; AVX512-NEXT:    kshiftrb $2, %k0, %k0
 ; AVX512-NEXT:    kmovb %k0, (%rsi)
 ; AVX512-NEXT:    retq
 ;
 ; AVX512NOTDQ-LABEL: load_v3i1_broadcast_2_v1i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
-; AVX512NOTDQ-NEXT:    xorl %ecx, %ecx
-; AVX512NOTDQ-NEXT:    btl $2, %eax
-; AVX512NOTDQ-NEXT:    movl $255, %eax
-; AVX512NOTDQ-NEXT:    cmovael %ecx, %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
+; AVX512NOTDQ-NEXT:    xorl %eax, %eax
+; AVX512NOTDQ-NEXT:    testb $4, (%rdi)
+; AVX512NOTDQ-NEXT:    movl $255, %ecx
+; AVX512NOTDQ-NEXT:    cmovel %eax, %ecx
+; AVX512NOTDQ-NEXT:    kmovd %ecx, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $2, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kmovd %k0, %eax
 ; AVX512NOTDQ-NEXT:    movb %al, (%rsi)

diff  --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
index 66bb6aa3d662..65009a77c4d4 100644
--- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
@@ -49,11 +49,12 @@ define i2 @bitcast_v4i32_to_v2i2(<4 x i32> %a0) nounwind {
 ; SSE2-SSSE3:       # %bb.0:
 ; SSE2-SSSE3-NEXT:    movmskps %xmm0, %eax
 ; SSE2-SSSE3-NEXT:    movl %eax, %ecx
-; SSE2-SSSE3-NEXT:    andl $3, %ecx
-; SSE2-SSSE3-NEXT:    movq %rcx, %xmm0
-; SSE2-SSSE3-NEXT:    shrl $2, %eax
-; SSE2-SSSE3-NEXT:    movq %rax, %xmm1
-; SSE2-SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT:    shrb $2, %cl
+; SSE2-SSSE3-NEXT:    movzbl %cl, %ecx
+; SSE2-SSSE3-NEXT:    andb $3, %al
+; SSE2-SSSE3-NEXT:    movzbl %al, %eax
+; SSE2-SSSE3-NEXT:    movd %eax, %xmm0
+; SSE2-SSSE3-NEXT:    pinsrw $4, %ecx, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    addb -{{[0-9]+}}(%rsp), %al
@@ -61,10 +62,10 @@ define i2 @bitcast_v4i32_to_v2i2(<4 x i32> %a0) nounwind {
 ;
 ; AVX12-LABEL: bitcast_v4i32_to_v2i2:
 ; AVX12:       # %bb.0:
-; AVX12-NEXT:    vmovmskps %xmm0, %ecx
-; AVX12-NEXT:    movl %ecx, %eax
-; AVX12-NEXT:    shrl $2, %eax
-; AVX12-NEXT:    andl $3, %ecx
+; AVX12-NEXT:    vmovmskps %xmm0, %eax
+; AVX12-NEXT:    movl %eax, %ecx
+; AVX12-NEXT:    shrb $2, %cl
+; AVX12-NEXT:    andb $3, %al
 ; AVX12-NEXT:    addb %cl, %al
 ; AVX12-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX12-NEXT:    retq
@@ -73,11 +74,10 @@ define i2 @bitcast_v4i32_to_v2i2(<4 x i32> %a0) nounwind {
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpcmpgtd %xmm0, %xmm1, %k0
-; AVX512-NEXT:    kmovd %k0, %ecx
-; AVX512-NEXT:    movzbl %cl, %eax
-; AVX512-NEXT:    shrl $2, %eax
-; AVX512-NEXT:    andl $3, %eax
-; AVX512-NEXT:    andl $3, %ecx
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movl %eax, %ecx
+; AVX512-NEXT:    shrb $2, %cl
+; AVX512-NEXT:    andb $3, %al
 ; AVX512-NEXT:    addb %cl, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    retq
@@ -94,13 +94,14 @@ define i4 @bitcast_v8i16_to_v2i4(<8 x i16> %a0) nounwind {
 ; SSE2-SSSE3:       # %bb.0:
 ; SSE2-SSSE3-NEXT:    packsswb %xmm0, %xmm0
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT:    movzbl %al, %ecx
-; SSE2-SSSE3-NEXT:    shrl $4, %ecx
-; SSE2-SSSE3-NEXT:    movq %rcx, %xmm0
-; SSE2-SSSE3-NEXT:    andl $15, %eax
-; SSE2-SSSE3-NEXT:    movq %rax, %xmm1
-; SSE2-SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-SSSE3-NEXT:    movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movl %eax, %ecx
+; SSE2-SSSE3-NEXT:    shrb $4, %cl
+; SSE2-SSSE3-NEXT:    movzbl %cl, %ecx
+; SSE2-SSSE3-NEXT:    andb $15, %al
+; SSE2-SSSE3-NEXT:    movzbl %al, %eax
+; SSE2-SSSE3-NEXT:    movd %eax, %xmm0
+; SSE2-SSSE3-NEXT:    pinsrw $4, %ecx, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    addb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    retq
@@ -108,10 +109,10 @@ define i4 @bitcast_v8i16_to_v2i4(<8 x i16> %a0) nounwind {
 ; AVX12-LABEL: bitcast_v8i16_to_v2i4:
 ; AVX12:       # %bb.0:
 ; AVX12-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
-; AVX12-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX12-NEXT:    movzbl %cl, %eax
-; AVX12-NEXT:    shrl $4, %eax
-; AVX12-NEXT:    andl $15, %ecx
+; AVX12-NEXT:    vpmovmskb %xmm0, %eax
+; AVX12-NEXT:    movl %eax, %ecx
+; AVX12-NEXT:    shrb $4, %cl
+; AVX12-NEXT:    andb $15, %al
 ; AVX12-NEXT:    addb %cl, %al
 ; AVX12-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX12-NEXT:    retq
@@ -119,10 +120,10 @@ define i4 @bitcast_v8i16_to_v2i4(<8 x i16> %a0) nounwind {
 ; AVX512-LABEL: bitcast_v8i16_to_v2i4:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovw2m %xmm0, %k0
-; AVX512-NEXT:    kmovd %k0, %ecx
-; AVX512-NEXT:    movzbl %cl, %eax
-; AVX512-NEXT:    shrl $4, %eax
-; AVX512-NEXT:    andl $15, %ecx
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movl %eax, %ecx
+; AVX512-NEXT:    shrb $4, %cl
+; AVX512-NEXT:    andb $15, %al
 ; AVX512-NEXT:    addb %cl, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    retq
@@ -181,22 +182,23 @@ define i2 @bitcast_v4i64_to_v2i2(<4 x i64> %a0) nounwind {
 ; SSE2-SSSE3-NEXT:    packssdw %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    movmskps %xmm0, %eax
 ; SSE2-SSSE3-NEXT:    movl %eax, %ecx
-; SSE2-SSSE3-NEXT:    shrl $2, %ecx
-; SSE2-SSSE3-NEXT:    movq %rcx, %xmm0
-; SSE2-SSSE3-NEXT:    andl $3, %eax
-; SSE2-SSSE3-NEXT:    movq %rax, %xmm1
-; SSE2-SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-SSSE3-NEXT:    movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    shrb $2, %cl
+; SSE2-SSSE3-NEXT:    movzbl %cl, %ecx
+; SSE2-SSSE3-NEXT:    andb $3, %al
+; SSE2-SSSE3-NEXT:    movzbl %al, %eax
+; SSE2-SSSE3-NEXT:    movd %eax, %xmm0
+; SSE2-SSSE3-NEXT:    pinsrw $4, %ecx, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    addb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    retq
 ;
 ; AVX12-LABEL: bitcast_v4i64_to_v2i2:
 ; AVX12:       # %bb.0:
-; AVX12-NEXT:    vmovmskpd %ymm0, %ecx
-; AVX12-NEXT:    movl %ecx, %eax
-; AVX12-NEXT:    shrl $2, %eax
-; AVX12-NEXT:    andl $3, %ecx
+; AVX12-NEXT:    vmovmskpd %ymm0, %eax
+; AVX12-NEXT:    movl %eax, %ecx
+; AVX12-NEXT:    shrb $2, %cl
+; AVX12-NEXT:    andb $3, %al
 ; AVX12-NEXT:    addb %cl, %al
 ; AVX12-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX12-NEXT:    vzeroupper
@@ -206,11 +208,10 @@ define i2 @bitcast_v4i64_to_v2i2(<4 x i64> %a0) nounwind {
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpcmpgtq %ymm0, %ymm1, %k0
-; AVX512-NEXT:    kmovd %k0, %ecx
-; AVX512-NEXT:    movzbl %cl, %eax
-; AVX512-NEXT:    shrl $2, %eax
-; AVX512-NEXT:    andl $3, %eax
-; AVX512-NEXT:    andl $3, %ecx
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movl %eax, %ecx
+; AVX512-NEXT:    shrb $2, %cl
+; AVX512-NEXT:    andb $3, %al
 ; AVX512-NEXT:    addb %cl, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
@@ -229,23 +230,24 @@ define i4 @bitcast_v8i32_to_v2i4(<8 x i32> %a0) nounwind {
 ; SSE2-SSSE3-NEXT:    packssdw %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    packsswb %xmm0, %xmm0
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT:    movzbl %al, %ecx
-; SSE2-SSSE3-NEXT:    shrl $4, %ecx
-; SSE2-SSSE3-NEXT:    movq %rcx, %xmm0
-; SSE2-SSSE3-NEXT:    andl $15, %eax
-; SSE2-SSSE3-NEXT:    movq %rax, %xmm1
-; SSE2-SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-SSSE3-NEXT:    movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movl %eax, %ecx
+; SSE2-SSSE3-NEXT:    shrb $4, %cl
+; SSE2-SSSE3-NEXT:    movzbl %cl, %ecx
+; SSE2-SSSE3-NEXT:    andb $15, %al
+; SSE2-SSSE3-NEXT:    movzbl %al, %eax
+; SSE2-SSSE3-NEXT:    movd %eax, %xmm0
+; SSE2-SSSE3-NEXT:    pinsrw $4, %ecx, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    addb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    retq
 ;
 ; AVX12-LABEL: bitcast_v8i32_to_v2i4:
 ; AVX12:       # %bb.0:
-; AVX12-NEXT:    vmovmskps %ymm0, %ecx
-; AVX12-NEXT:    movl %ecx, %eax
-; AVX12-NEXT:    shrl $4, %eax
-; AVX12-NEXT:    andl $15, %ecx
+; AVX12-NEXT:    vmovmskps %ymm0, %eax
+; AVX12-NEXT:    movl %eax, %ecx
+; AVX12-NEXT:    shrb $4, %cl
+; AVX12-NEXT:    andb $15, %al
 ; AVX12-NEXT:    addb %cl, %al
 ; AVX12-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX12-NEXT:    vzeroupper
@@ -255,10 +257,10 @@ define i4 @bitcast_v8i32_to_v2i4(<8 x i32> %a0) nounwind {
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpcmpgtd %ymm0, %ymm1, %k0
-; AVX512-NEXT:    kmovd %k0, %ecx
-; AVX512-NEXT:    movzbl %cl, %eax
-; AVX512-NEXT:    shrl $4, %eax
-; AVX512-NEXT:    andl $15, %ecx
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movl %eax, %ecx
+; AVX512-NEXT:    shrb $4, %cl
+; AVX512-NEXT:    andb $15, %al
 ; AVX512-NEXT:    addb %cl, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
@@ -391,13 +393,14 @@ define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind {
 ; SSE2-SSSE3-NEXT:    packssdw %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    packsswb %xmm0, %xmm0
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT:    movzbl %al, %ecx
-; SSE2-SSSE3-NEXT:    shrl $4, %ecx
-; SSE2-SSSE3-NEXT:    movq %rcx, %xmm0
-; SSE2-SSSE3-NEXT:    andl $15, %eax
-; SSE2-SSSE3-NEXT:    movq %rax, %xmm1
-; SSE2-SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-SSSE3-NEXT:    movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movl %eax, %ecx
+; SSE2-SSSE3-NEXT:    shrb $4, %cl
+; SSE2-SSSE3-NEXT:    movzbl %cl, %ecx
+; SSE2-SSSE3-NEXT:    andb $15, %al
+; SSE2-SSSE3-NEXT:    movzbl %al, %eax
+; SSE2-SSSE3-NEXT:    movd %eax, %xmm0
+; SSE2-SSSE3-NEXT:    pinsrw $4, %ecx, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    addb -{{[0-9]+}}(%rsp), %al
 ; SSE2-SSSE3-NEXT:    retq
@@ -412,10 +415,10 @@ define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind {
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vmovmskps %ymm0, %ecx
-; AVX1-NEXT:    movl %ecx, %eax
-; AVX1-NEXT:    shrl $4, %eax
-; AVX1-NEXT:    andl $15, %ecx
+; AVX1-NEXT:    vmovmskps %ymm0, %eax
+; AVX1-NEXT:    movl %eax, %ecx
+; AVX1-NEXT:    shrb $4, %cl
+; AVX1-NEXT:    andb $15, %al
 ; AVX1-NEXT:    addb %cl, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
@@ -425,10 +428,10 @@ define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind {
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT:    vmovmskps %ymm0, %ecx
-; AVX2-NEXT:    movl %ecx, %eax
-; AVX2-NEXT:    shrl $4, %eax
-; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vmovmskps %ymm0, %eax
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    shrb $4, %cl
+; AVX2-NEXT:    andb $15, %al
 ; AVX2-NEXT:    addb %cl, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
@@ -438,10 +441,10 @@ define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind {
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpcmpgtq %zmm0, %zmm1, %k0
-; AVX512-NEXT:    kmovd %k0, %ecx
-; AVX512-NEXT:    movzbl %cl, %eax
-; AVX512-NEXT:    shrl $4, %eax
-; AVX512-NEXT:    andl $15, %ecx
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movl %eax, %ecx
+; AVX512-NEXT:    shrb $4, %cl
+; AVX512-NEXT:    andb $15, %al
 ; AVX512-NEXT:    addb %cl, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll
index 78487bd162d1..b96f44ec3073 100644
--- a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll
+++ b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll
@@ -542,7 +542,7 @@ define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
 define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind {
 ; SSE2-LABEL: _clearupper4xi64b:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movaps {{.*#+}} xmm2
+; SSE2-NEXT:    movaps {{.*#+}} xmm2 = [NaN,0.0E+0,NaN,0.0E+0]
 ; SSE2-NEXT:    andps %xmm2, %xmm0
 ; SSE2-NEXT:    andps %xmm2, %xmm1
 ; SSE2-NEXT:    retq
@@ -805,48 +805,48 @@ define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
 ; AVX-NEXT:    pushq %rbx
 ; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %r9
-; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
 ; AVX-NEXT:    movq %r9, %r8
 ; AVX-NEXT:    shrq $56, %r8
 ; AVX-NEXT:    andl $15, %r8d
 ; AVX-NEXT:    movq %r9, %r10
 ; AVX-NEXT:    shrq $48, %r10
 ; AVX-NEXT:    andl $15, %r10d
-; AVX-NEXT:    movq %r9, %rsi
-; AVX-NEXT:    shrq $40, %rsi
-; AVX-NEXT:    andl $15, %esi
+; AVX-NEXT:    movq %rcx, %rdx
+; AVX-NEXT:    shldq $24, %r9, %rdx
+; AVX-NEXT:    andl $15, %edx
 ; AVX-NEXT:    movq %r9, %r11
 ; AVX-NEXT:    shrq $32, %r11
 ; AVX-NEXT:    andl $15, %r11d
-; AVX-NEXT:    movq %rdx, %rdi
+; AVX-NEXT:    movq %rcx, %rdi
 ; AVX-NEXT:    shrq $56, %rdi
 ; AVX-NEXT:    andl $15, %edi
-; AVX-NEXT:    movq %rdx, %rax
-; AVX-NEXT:    shrq $48, %rax
+; AVX-NEXT:    movq %rcx, %rsi
+; AVX-NEXT:    shrq $48, %rsi
+; AVX-NEXT:    andl $15, %esi
+; AVX-NEXT:    movq %rcx, %rax
+; AVX-NEXT:    shrq $40, %rax
 ; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    movq %rdx, %rcx
-; AVX-NEXT:    shrq $40, %rcx
-; AVX-NEXT:    andl $15, %ecx
-; AVX-NEXT:    movq %rdx, %rbx
+; AVX-NEXT:    movq %rcx, %rbx
 ; AVX-NEXT:    shrq $32, %rbx
 ; AVX-NEXT:    andl $15, %ebx
 ; AVX-NEXT:    shlq $32, %rbx
-; AVX-NEXT:    andl $252645135, %edx # imm = 0xF0F0F0F
-; AVX-NEXT:    orq %rbx, %rdx
-; AVX-NEXT:    shlq $40, %rcx
-; AVX-NEXT:    orq %rdx, %rcx
-; AVX-NEXT:    shlq $48, %rax
+; AVX-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; AVX-NEXT:    orq %rbx, %rcx
+; AVX-NEXT:    shlq $40, %rax
 ; AVX-NEXT:    orq %rcx, %rax
+; AVX-NEXT:    shlq $48, %rsi
+; AVX-NEXT:    orq %rax, %rsi
 ; AVX-NEXT:    shlq $56, %rdi
-; AVX-NEXT:    orq %rax, %rdi
+; AVX-NEXT:    orq %rsi, %rdi
 ; AVX-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    shlq $32, %r11
 ; AVX-NEXT:    andl $252645135, %r9d # imm = 0xF0F0F0F
 ; AVX-NEXT:    orq %r11, %r9
-; AVX-NEXT:    shlq $40, %rsi
-; AVX-NEXT:    orq %r9, %rsi
+; AVX-NEXT:    shlq $40, %rdx
+; AVX-NEXT:    orq %r9, %rdx
 ; AVX-NEXT:    shlq $48, %r10
-; AVX-NEXT:    orq %rsi, %r10
+; AVX-NEXT:    orq %rdx, %r10
 ; AVX-NEXT:    shlq $56, %r8
 ; AVX-NEXT:    orq %r10, %r8
 ; AVX-NEXT:    movq %r8, -{{[0-9]+}}(%rsp)
@@ -986,96 +986,96 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX1-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    movq %rax, %rcx
+; AVX1-NEXT:    movq %rax, %r8
 ; AVX1-NEXT:    movq %rax, %rdx
 ; AVX1-NEXT:    movq %rax, %rsi
 ; AVX1-NEXT:    movq %rax, %rdi
-; AVX1-NEXT:    shrq $32, %rdi
-; AVX1-NEXT:    andl $15, %edi
-; AVX1-NEXT:    shlq $32, %rdi
+; AVX1-NEXT:    movq %rax, %rcx
+; AVX1-NEXT:    shrq $32, %rcx
+; AVX1-NEXT:    andl $15, %ecx
+; AVX1-NEXT:    shlq $32, %rcx
 ; AVX1-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; AVX1-NEXT:    orq %rdi, %rax
-; AVX1-NEXT:    movq -{{[0-9]+}}(%rsp), %rdi
-; AVX1-NEXT:    shrq $40, %rsi
+; AVX1-NEXT:    orq %rcx, %rax
+; AVX1-NEXT:    shrq $40, %rdi
+; AVX1-NEXT:    andl $15, %edi
+; AVX1-NEXT:    shlq $40, %rdi
+; AVX1-NEXT:    orq %rax, %rdi
+; AVX1-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    shrq $48, %rsi
 ; AVX1-NEXT:    andl $15, %esi
-; AVX1-NEXT:    shlq $40, %rsi
-; AVX1-NEXT:    orq %rax, %rsi
-; AVX1-NEXT:    movq %rdi, %rax
-; AVX1-NEXT:    shrq $48, %rdx
+; AVX1-NEXT:    shlq $48, %rsi
+; AVX1-NEXT:    orq %rdi, %rsi
+; AVX1-NEXT:    movq %rax, %rcx
+; AVX1-NEXT:    shrq $56, %rdx
 ; AVX1-NEXT:    andl $15, %edx
-; AVX1-NEXT:    shlq $48, %rdx
+; AVX1-NEXT:    shlq $56, %rdx
 ; AVX1-NEXT:    orq %rsi, %rdx
-; AVX1-NEXT:    movq %rdi, %rsi
+; AVX1-NEXT:    movq %rax, %rsi
+; AVX1-NEXT:    shldq $24, %rax, %r8
+; AVX1-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movq %rax, %rdx
+; AVX1-NEXT:    shrq $32, %rdx
+; AVX1-NEXT:    andl $15, %edx
+; AVX1-NEXT:    shlq $32, %rdx
+; AVX1-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; AVX1-NEXT:    orq %rdx, %rax
+; AVX1-NEXT:    andl $15, %r8d
+; AVX1-NEXT:    shlq $40, %r8
+; AVX1-NEXT:    orq %rax, %r8
+; AVX1-NEXT:    shrq $48, %rsi
+; AVX1-NEXT:    andl $15, %esi
+; AVX1-NEXT:    shlq $48, %rsi
+; AVX1-NEXT:    orq %r8, %rsi
 ; AVX1-NEXT:    shrq $56, %rcx
 ; AVX1-NEXT:    andl $15, %ecx
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    shlq $56, %rcx
-; AVX1-NEXT:    orq %rdx, %rcx
-; AVX1-NEXT:    movq %rdi, %rdx
+; AVX1-NEXT:    orq %rsi, %rcx
+; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    movq %rcx, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    movq %rdi, %rcx
+; AVX1-NEXT:    movl %eax, %ecx
+; AVX1-NEXT:    shrl $8, %ecx
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vpinsrb $1, %ecx, %xmm1, %xmm1
+; AVX1-NEXT:    movl %eax, %ecx
+; AVX1-NEXT:    shrl $16, %ecx
+; AVX1-NEXT:    vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX1-NEXT:    movl %eax, %ecx
+; AVX1-NEXT:    shrl $24, %ecx
+; AVX1-NEXT:    vpinsrb $3, %ecx, %xmm1, %xmm1
+; AVX1-NEXT:    movq %rax, %rcx
 ; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    andl $15, %ecx
-; AVX1-NEXT:    shlq $32, %rcx
-; AVX1-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; AVX1-NEXT:    orq %rcx, %rdi
-; AVX1-NEXT:    shrq $40, %rdx
-; AVX1-NEXT:    andl $15, %edx
-; AVX1-NEXT:    shlq $40, %rdx
-; AVX1-NEXT:    orq %rdi, %rdx
-; AVX1-NEXT:    shrq $48, %rsi
-; AVX1-NEXT:    andl $15, %esi
-; AVX1-NEXT:    shlq $48, %rsi
-; AVX1-NEXT:    orq %rdx, %rsi
+; AVX1-NEXT:    vpinsrb $4, %ecx, %xmm1, %xmm1
+; AVX1-NEXT:    movq %rax, %rcx
+; AVX1-NEXT:    shrq $40, %rcx
+; AVX1-NEXT:    vpinsrb $5, %ecx, %xmm1, %xmm1
+; AVX1-NEXT:    movq %rax, %rcx
+; AVX1-NEXT:    shrq $48, %rcx
+; AVX1-NEXT:    vpinsrb $6, %ecx, %xmm1, %xmm1
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rcx
 ; AVX1-NEXT:    shrq $56, %rax
-; AVX1-NEXT:    andl $15, %eax
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    shlq $56, %rax
-; AVX1-NEXT:    orq %rsi, %rax
-; AVX1-NEXT:    vmovq %xmm0, %rcx
-; AVX1-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm0
 ; AVX1-NEXT:    movl %ecx, %eax
 ; AVX1-NEXT:    shrl $8, %eax
-; AVX1-NEXT:    vmovd %ecx, %xmm1
-; AVX1-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
 ; AVX1-NEXT:    movl %ecx, %eax
 ; AVX1-NEXT:    shrl $16, %eax
-; AVX1-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
 ; AVX1-NEXT:    movl %ecx, %eax
 ; AVX1-NEXT:    shrl $24, %eax
-; AVX1-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
 ; AVX1-NEXT:    movq %rcx, %rax
 ; AVX1-NEXT:    shrq $32, %rax
-; AVX1-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
 ; AVX1-NEXT:    movq %rcx, %rax
 ; AVX1-NEXT:    shrq $40, %rax
-; AVX1-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
 ; AVX1-NEXT:    movq %rcx, %rax
 ; AVX1-NEXT:    shrq $48, %rax
-; AVX1-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0
 ; AVX1-NEXT:    shrq $56, %rcx
-; AVX1-NEXT:    vpinsrb $7, %ecx, %xmm1, %xmm0
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $8, %ecx
-; AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $16, %ecx
-; AVX1-NEXT:    vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $24, %ecx
-; AVX1-NEXT:    vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shrq $40, %rcx
-; AVX1-NEXT:    vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shrq $48, %rcx
-; AVX1-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    shrq $56, %rax
-; AVX1-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrb $15, %ecx, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
@@ -1084,96 +1084,96 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX2-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
-; AVX2-NEXT:    movq %rax, %rcx
+; AVX2-NEXT:    movq %rax, %r8
 ; AVX2-NEXT:    movq %rax, %rdx
 ; AVX2-NEXT:    movq %rax, %rsi
 ; AVX2-NEXT:    movq %rax, %rdi
-; AVX2-NEXT:    shrq $32, %rdi
-; AVX2-NEXT:    andl $15, %edi
-; AVX2-NEXT:    shlq $32, %rdi
+; AVX2-NEXT:    movq %rax, %rcx
+; AVX2-NEXT:    shrq $32, %rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    shlq $32, %rcx
 ; AVX2-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; AVX2-NEXT:    orq %rdi, %rax
-; AVX2-NEXT:    movq -{{[0-9]+}}(%rsp), %rdi
-; AVX2-NEXT:    shrq $40, %rsi
+; AVX2-NEXT:    orq %rcx, %rax
+; AVX2-NEXT:    shrq $40, %rdi
+; AVX2-NEXT:    andl $15, %edi
+; AVX2-NEXT:    shlq $40, %rdi
+; AVX2-NEXT:    orq %rax, %rdi
+; AVX2-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    shrq $48, %rsi
 ; AVX2-NEXT:    andl $15, %esi
-; AVX2-NEXT:    shlq $40, %rsi
-; AVX2-NEXT:    orq %rax, %rsi
-; AVX2-NEXT:    movq %rdi, %rax
-; AVX2-NEXT:    shrq $48, %rdx
+; AVX2-NEXT:    shlq $48, %rsi
+; AVX2-NEXT:    orq %rdi, %rsi
+; AVX2-NEXT:    movq %rax, %rcx
+; AVX2-NEXT:    shrq $56, %rdx
 ; AVX2-NEXT:    andl $15, %edx
-; AVX2-NEXT:    shlq $48, %rdx
+; AVX2-NEXT:    shlq $56, %rdx
 ; AVX2-NEXT:    orq %rsi, %rdx
-; AVX2-NEXT:    movq %rdi, %rsi
+; AVX2-NEXT:    movq %rax, %rsi
+; AVX2-NEXT:    shldq $24, %rax, %r8
+; AVX2-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movq %rax, %rdx
+; AVX2-NEXT:    shrq $32, %rdx
+; AVX2-NEXT:    andl $15, %edx
+; AVX2-NEXT:    shlq $32, %rdx
+; AVX2-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; AVX2-NEXT:    orq %rdx, %rax
+; AVX2-NEXT:    andl $15, %r8d
+; AVX2-NEXT:    shlq $40, %r8
+; AVX2-NEXT:    orq %rax, %r8
+; AVX2-NEXT:    shrq $48, %rsi
+; AVX2-NEXT:    andl $15, %esi
+; AVX2-NEXT:    shlq $48, %rsi
+; AVX2-NEXT:    orq %r8, %rsi
 ; AVX2-NEXT:    shrq $56, %rcx
 ; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    shlq $56, %rcx
-; AVX2-NEXT:    orq %rdx, %rcx
-; AVX2-NEXT:    movq %rdi, %rdx
+; AVX2-NEXT:    orq %rsi, %rcx
+; AVX2-NEXT:    vmovq %xmm0, %rax
 ; AVX2-NEXT:    movq %rcx, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    movq %rdi, %rcx
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    shrl $8, %ecx
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vpinsrb $1, %ecx, %xmm1, %xmm1
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    shrl $16, %ecx
+; AVX2-NEXT:    vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    shrl $24, %ecx
+; AVX2-NEXT:    vpinsrb $3, %ecx, %xmm1, %xmm1
+; AVX2-NEXT:    movq %rax, %rcx
 ; AVX2-NEXT:    shrq $32, %rcx
-; AVX2-NEXT:    andl $15, %ecx
-; AVX2-NEXT:    shlq $32, %rcx
-; AVX2-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; AVX2-NEXT:    orq %rcx, %rdi
-; AVX2-NEXT:    shrq $40, %rdx
-; AVX2-NEXT:    andl $15, %edx
-; AVX2-NEXT:    shlq $40, %rdx
-; AVX2-NEXT:    orq %rdi, %rdx
-; AVX2-NEXT:    shrq $48, %rsi
-; AVX2-NEXT:    andl $15, %esi
-; AVX2-NEXT:    shlq $48, %rsi
-; AVX2-NEXT:    orq %rdx, %rsi
+; AVX2-NEXT:    vpinsrb $4, %ecx, %xmm1, %xmm1
+; AVX2-NEXT:    movq %rax, %rcx
+; AVX2-NEXT:    shrq $40, %rcx
+; AVX2-NEXT:    vpinsrb $5, %ecx, %xmm1, %xmm1
+; AVX2-NEXT:    movq %rax, %rcx
+; AVX2-NEXT:    shrq $48, %rcx
+; AVX2-NEXT:    vpinsrb $6, %ecx, %xmm1, %xmm1
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rcx
 ; AVX2-NEXT:    shrq $56, %rax
-; AVX2-NEXT:    andl $15, %eax
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT:    shlq $56, %rax
-; AVX2-NEXT:    orq %rsi, %rax
-; AVX2-NEXT:    vmovq %xmm0, %rcx
-; AVX2-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm0
 ; AVX2-NEXT:    movl %ecx, %eax
 ; AVX2-NEXT:    shrl $8, %eax
-; AVX2-NEXT:    vmovd %ecx, %xmm1
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    movl %ecx, %eax
 ; AVX2-NEXT:    shrl $16, %eax
-; AVX2-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    movl %ecx, %eax
 ; AVX2-NEXT:    shrl $24, %eax
-; AVX2-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    movq %rcx, %rax
 ; AVX2-NEXT:    shrq $32, %rax
-; AVX2-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    movq %rcx, %rax
 ; AVX2-NEXT:    shrq $40, %rax
-; AVX2-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    movq %rcx, %rax
 ; AVX2-NEXT:    shrq $48, %rax
-; AVX2-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX2-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    shrq $56, %rcx
-; AVX2-NEXT:    vpinsrb $7, %ecx, %xmm1, %xmm0
-; AVX2-NEXT:    movl %eax, %ecx
-; AVX2-NEXT:    shrl $8, %ecx
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %ecx
-; AVX2-NEXT:    shrl $16, %ecx
-; AVX2-NEXT:    vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %ecx
-; AVX2-NEXT:    shrl $24, %ecx
-; AVX2-NEXT:    vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shrq $32, %rcx
-; AVX2-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shrq $40, %rcx
-; AVX2-NEXT:    vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shrq $48, %rcx
-; AVX2-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    shrq $56, %rax
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrb $15, %ecx, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm1
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/load-local-v3i1.ll b/llvm/test/CodeGen/X86/load-local-v3i1.ll
index da432cb1ab34..f471c637a592 100644
--- a/llvm/test/CodeGen/X86/load-local-v3i1.ll
+++ b/llvm/test/CodeGen/X86/load-local-v3i1.ll
@@ -98,20 +98,23 @@ define void @local_load_v3i1(i32 addrspace(1)* %out, i32 addrspace(1)* %in, <3 x
 ; CHECK-NEXT:    movq %rdi, %r14
 ; CHECK-NEXT:    movb (%rdx), %al
 ; CHECK-NEXT:    movl %eax, %ecx
-; CHECK-NEXT:    shrb $2, %cl
-; CHECK-NEXT:    movzbl %al, %r15d
-; CHECK-NEXT:    shrb %al
-; CHECK-NEXT:    movzbl %al, %ebx
-; CHECK-NEXT:    movzbl %cl, %ebp
+; CHECK-NEXT:    shrb %cl
+; CHECK-NEXT:    andb $1, %cl
+; CHECK-NEXT:    movl %eax, %edx
+; CHECK-NEXT:    shrb $2, %dl
+; CHECK-NEXT:    andb $1, %al
+; CHECK-NEXT:    movzbl %al, %ebp
+; CHECK-NEXT:    movzbl %dl, %r15d
+; CHECK-NEXT:    movzbl %cl, %ebx
 ; CHECK-NEXT:    movq %rsi, %rdi
-; CHECK-NEXT:    movl %r15d, %esi
+; CHECK-NEXT:    movl %ebp, %esi
 ; CHECK-NEXT:    movl %ebx, %edx
-; CHECK-NEXT:    movl %ebp, %ecx
+; CHECK-NEXT:    movl %r15d, %ecx
 ; CHECK-NEXT:    callq masked_load_v3
 ; CHECK-NEXT:    movq %r14, %rdi
-; CHECK-NEXT:    movl %r15d, %esi
+; CHECK-NEXT:    movl %ebp, %esi
 ; CHECK-NEXT:    movl %ebx, %edx
-; CHECK-NEXT:    movl %ebp, %ecx
+; CHECK-NEXT:    movl %r15d, %ecx
 ; CHECK-NEXT:    callq masked_store4_v3
 ; CHECK-NEXT:    addq $8, %rsp
 ; CHECK-NEXT:    popq %rbx

diff  --git a/llvm/test/CodeGen/X86/load-local-v4i5.ll b/llvm/test/CodeGen/X86/load-local-v4i5.ll
new file mode 100644
index 000000000000..cb382a59436e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/load-local-v4i5.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+ at 0 = internal unnamed_addr constant [4 x i5] [i5 2, i5 0, i5 2, i5 -1], align 1
+
+; Function Attrs: nobuiltin nounwind
+define void @_start() {
+; CHECK-LABEL: _start:
+; CHECK:       # %bb.0: # %Entry
+; CHECK-NEXT:    movl {{.*}}(%rip), %eax
+; CHECK-NEXT:    movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb -{{[0-9]+}}(%rsp), %cl
+; CHECK-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
+; CHECK-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
+; CHECK-NEXT:    andl $31, %eax
+; CHECK-NEXT:    andl $31, %esi
+; CHECK-NEXT:    shll $5, %esi
+; CHECK-NEXT:    orl %eax, %esi
+; CHECK-NEXT:    andl $31, %edx
+; CHECK-NEXT:    shll $10, %edx
+; CHECK-NEXT:    orl %esi, %edx
+; CHECK-NEXT:    movzbl %cl, %eax
+; CHECK-NEXT:    movl %eax, %ecx
+; CHECK-NEXT:    shll $15, %ecx
+; CHECK-NEXT:    orl %edx, %ecx
+; CHECK-NEXT:    movw %cx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    shrl $16, %ecx
+; CHECK-NEXT:    andl $15, %ecx
+; CHECK-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    cmpb $31, %al
+; CHECK-NEXT:    je .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %Then
+; CHECK-NEXT:    int3
+; CHECK-NEXT:  .LBB0_2: # %EndIf
+; CHECK-NEXT:    retq
+Entry:
+  %x = alloca [4 x i5], align 1
+  %y = alloca <4 x i5>, align 4
+  %z = alloca i5, align 1
+  %0 = bitcast [4 x i5]* %x to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 bitcast ([4 x i5]* @0 to i8*), i64 4, i1 false)
+  %1 = getelementptr inbounds [4 x i5], [4 x i5]* %x, i64 0, i64 0
+  %2 = load i5, i5* %1
+  %3 = insertelement <4 x i5> undef, i5 %2, i32 0
+  %4 = getelementptr inbounds [4 x i5], [4 x i5]* %x, i64 0, i64 1
+  %5 = load i5, i5* %4
+  %6 = insertelement <4 x i5> %3, i5 %5, i32 1
+  %7 = getelementptr inbounds [4 x i5], [4 x i5]* %x, i64 0, i64 2
+  %8 = load i5, i5* %7
+  %9 = insertelement <4 x i5> %6, i5 %8, i32 2
+  %10 = getelementptr inbounds [4 x i5], [4 x i5]* %x, i64 0, i64 3
+  %11 = load i5, i5* %10
+  %12 = insertelement <4 x i5> %9, i5 %11, i32 3
+  store <4 x i5> %12, <4 x i5>* %y, align 4
+  %13 = load <4 x i5>, <4 x i5>* %y
+  %14 = extractelement <4 x i5> %13, i32 3
+  store i5 %14, i5* %z, align 1
+  %15 = load i5, i5* %z, align 1
+  %16 = icmp ne i5 %15, -1
+  br i1 %16, label %Then, label %Else
+
+Then:                                             ; preds = %Entry
+  call void @llvm.debugtrap()
+  br label %EndIf
+
+Else:                                             ; preds = %Entry
+  br label %EndIf
+
+EndIf:                                            ; preds = %Else, %Then
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind willreturn
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+
+; Function Attrs: nounwind
+declare void @llvm.debugtrap()

diff  --git a/llvm/test/CodeGen/X86/pr15267.ll b/llvm/test/CodeGen/X86/pr15267.ll
index b515fe8c4863..73acb76ce55f 100644
--- a/llvm/test/CodeGen/X86/pr15267.ll
+++ b/llvm/test/CodeGen/X86/pr15267.ll
@@ -7,14 +7,18 @@ define <4 x i3> @test1(<4 x i3>* %in) nounwind {
 ; CHECK-NEXT:    movzwl (%rdi), %eax
 ; CHECK-NEXT:    movl %eax, %ecx
 ; CHECK-NEXT:    shrl $3, %ecx
-; CHECK-NEXT:    vmovd %eax, %xmm0
-; CHECK-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
+; CHECK-NEXT:    andl $7, %ecx
+; CHECK-NEXT:    movl %eax, %edx
+; CHECK-NEXT:    andl $7, %edx
+; CHECK-NEXT:    vmovd %edx, %xmm0
+; CHECK-NEXT:    vpinsrw $2, %ecx, %xmm0, %xmm0
 ; CHECK-NEXT:    movl %eax, %ecx
 ; CHECK-NEXT:    shrl $6, %ecx
-; CHECK-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
+; CHECK-NEXT:    andl $7, %ecx
+; CHECK-NEXT:    vpinsrw $4, %ecx, %xmm0, %xmm0
 ; CHECK-NEXT:    shrl $9, %eax
-; CHECK-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
-; CHECK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    andl $7, %eax
+; CHECK-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %ret = load <4 x i3>, <4 x i3>* %in, align 1
   ret <4 x i3> %ret
@@ -23,17 +27,24 @@ define <4 x i3> @test1(<4 x i3>* %in) nounwind {
 define <4 x i1> @test2(<4 x i1>* %in) nounwind {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movzbl (%rdi), %eax
+; CHECK-NEXT:    movb (%rdi), %al
 ; CHECK-NEXT:    movl %eax, %ecx
-; CHECK-NEXT:    shrl %ecx
-; CHECK-NEXT:    vmovd %eax, %xmm0
-; CHECK-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
+; CHECK-NEXT:    shrb %cl
+; CHECK-NEXT:    andb $1, %cl
+; CHECK-NEXT:    movzbl %cl, %ecx
+; CHECK-NEXT:    movl %eax, %edx
+; CHECK-NEXT:    andb $1, %dl
+; CHECK-NEXT:    movzbl %dl, %edx
+; CHECK-NEXT:    vmovd %edx, %xmm0
+; CHECK-NEXT:    vpinsrb $4, %ecx, %xmm0, %xmm0
 ; CHECK-NEXT:    movl %eax, %ecx
-; CHECK-NEXT:    shrl $2, %ecx
-; CHECK-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; CHECK-NEXT:    shrl $3, %eax
-; CHECK-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
-; CHECK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    shrb $2, %cl
+; CHECK-NEXT:    andb $1, %cl
+; CHECK-NEXT:    movzbl %cl, %ecx
+; CHECK-NEXT:    vpinsrb $8, %ecx, %xmm0, %xmm0
+; CHECK-NEXT:    shrb $3, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %ret = load <4 x i1>, <4 x i1>* %in, align 1
   ret <4 x i1> %ret
@@ -42,21 +53,26 @@ define <4 x i1> @test2(<4 x i1>* %in) nounwind {
 define <4 x i64> @test3(<4 x i1>* %in) nounwind {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movzbl (%rdi), %eax
-; CHECK-NEXT:    movq %rax, %rcx
-; CHECK-NEXT:    shlq $62, %rcx
-; CHECK-NEXT:    sarq $63, %rcx
-; CHECK-NEXT:    movq %rax, %rdx
-; CHECK-NEXT:    shlq $63, %rdx
-; CHECK-NEXT:    sarq $63, %rdx
+; CHECK-NEXT:    movb (%rdi), %al
+; CHECK-NEXT:    movzbl %al, %ecx
+; CHECK-NEXT:    shrb %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    andl $1, %eax
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    movl %ecx, %edx
+; CHECK-NEXT:    andl $1, %edx
+; CHECK-NEXT:    negl %edx
 ; CHECK-NEXT:    vmovd %edx, %xmm0
-; CHECK-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
-; CHECK-NEXT:    movq %rax, %rcx
-; CHECK-NEXT:    shlq $61, %rcx
-; CHECK-NEXT:    sarq $63, %rcx
-; CHECK-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; CHECK-NEXT:    shlq $60, %rax
-; CHECK-NEXT:    sarq $63, %rax
+; CHECK-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    shrb $2, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    andl $1, %eax
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
+; CHECK-NEXT:    shrb $3, %cl
+; CHECK-NEXT:    movzbl %cl, %eax
+; CHECK-NEXT:    negl %eax
 ; CHECK-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovsxdq %xmm0, %xmm1
 ; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]

diff  --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index 999fe4a731d9..d162f5c4a97a 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -1342,41 +1342,44 @@ entry:
 define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
 ; SSE-LABEL: load_sext_2i1_to_2i64:
 ; SSE:       # %bb.0: # %entry
-; SSE-NEXT:    movzbl (%rdi), %eax
-; SSE-NEXT:    movq %rax, %rcx
-; SSE-NEXT:    shlq $62, %rcx
-; SSE-NEXT:    movq %rcx, %xmm0
-; SSE-NEXT:    shlq $63, %rax
+; SSE-NEXT:    movb (%rdi), %al
+; SSE-NEXT:    movzbl %al, %ecx
+; SSE-NEXT:    shrb %al
+; SSE-NEXT:    movzbl %al, %eax
+; SSE-NEXT:    negq %rax
 ; SSE-NEXT:    movq %rax, %xmm1
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT:    psrad $31, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE-NEXT:    andl $1, %ecx
+; SSE-NEXT:    negq %rcx
+; SSE-NEXT:    movq %rcx, %xmm0
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_sext_2i1_to_2i64:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    movzbl (%rdi), %eax
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shlq $62, %rcx
-; AVX1-NEXT:    vmovq %rcx, %xmm0
-; AVX1-NEXT:    shlq $63, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm1
+; AVX1-NEXT:    movb (%rdi), %al
+; AVX1-NEXT:    movzbl %al, %ecx
+; AVX1-NEXT:    shrb %al
+; AVX1-NEXT:    movzbl %al, %eax
+; AVX1-NEXT:    negq %rax
+; AVX1-NEXT:    vmovq %rax, %xmm0
+; AVX1-NEXT:    andl $1, %ecx
+; AVX1-NEXT:    negq %rcx
+; AVX1-NEXT:    vmovq %rcx, %xmm1
 ; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_sext_2i1_to_2i64:
 ; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    movzbl (%rdi), %eax
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shlq $62, %rcx
-; AVX2-NEXT:    vmovq %rcx, %xmm0
-; AVX2-NEXT:    shlq $63, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm1
+; AVX2-NEXT:    movb (%rdi), %al
+; AVX2-NEXT:    movzbl %al, %ecx
+; AVX2-NEXT:    shrb %al
+; AVX2-NEXT:    movzbl %al, %eax
+; AVX2-NEXT:    negq %rax
+; AVX2-NEXT:    vmovq %rax, %xmm0
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    negq %rcx
+; AVX2-NEXT:    vmovq %rcx, %xmm1
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_sext_2i1_to_2i64:
@@ -1390,30 +1393,34 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
 ; X32-SSE2-LABEL: load_sext_2i1_to_2i64:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movzbl (%eax), %eax
-; X32-SSE2-NEXT:    movl %eax, %ecx
-; X32-SSE2-NEXT:    shll $30, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
-; X32-SSE2-NEXT:    shll $31, %eax
+; X32-SSE2-NEXT:    movb (%eax), %al
+; X32-SSE2-NEXT:    movzbl %al, %ecx
+; X32-SSE2-NEXT:    shrb %al
+; X32-SSE2-NEXT:    movzbl %al, %eax
+; X32-SSE2-NEXT:    negl %eax
 ; X32-SSE2-NEXT:    movd %eax, %xmm0
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
+; X32-SSE2-NEXT:    andl $1, %ecx
+; X32-SSE2-NEXT:    negl %ecx
+; X32-SSE2-NEXT:    movd %ecx, %xmm0
 ; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT:    psrad $31, %xmm0
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: load_sext_2i1_to_2i64:
 ; X32-SSE41:       # %bb.0: # %entry
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movzbl (%eax), %eax
-; X32-SSE41-NEXT:    movl %eax, %ecx
-; X32-SSE41-NEXT:    shll $31, %ecx
+; X32-SSE41-NEXT:    movb (%eax), %al
+; X32-SSE41-NEXT:    movzbl %al, %ecx
+; X32-SSE41-NEXT:    andl $1, %ecx
+; X32-SSE41-NEXT:    negl %ecx
 ; X32-SSE41-NEXT:    movd %ecx, %xmm0
 ; X32-SSE41-NEXT:    pinsrd $1, %ecx, %xmm0
-; X32-SSE41-NEXT:    shll $30, %eax
+; X32-SSE41-NEXT:    shrb %al
+; X32-SSE41-NEXT:    movzbl %al, %eax
+; X32-SSE41-NEXT:    negl %eax
 ; X32-SSE41-NEXT:    pinsrd $2, %eax, %xmm0
 ; X32-SSE41-NEXT:    pinsrd $3, %eax, %xmm0
-; X32-SSE41-NEXT:    psrad $31, %xmm0
 ; X32-SSE41-NEXT:    retl
 entry:
  %X = load <2 x i1>, <2 x i1>* %ptr
@@ -1483,107 +1490,132 @@ entry:
 define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
 ; SSE2-LABEL: load_sext_4i1_to_4i32:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movl (%rdi), %eax
-; SSE2-NEXT:    movq %rax, %rcx
-; SSE2-NEXT:    shlq $60, %rcx
-; SSE2-NEXT:    sarq $63, %rcx
+; SSE2-NEXT:    movb (%rdi), %al
+; SSE2-NEXT:    movl %eax, %ecx
+; SSE2-NEXT:    shrb $3, %cl
+; SSE2-NEXT:    movzbl %cl, %ecx
+; SSE2-NEXT:    negl %ecx
 ; SSE2-NEXT:    movd %ecx, %xmm0
-; SSE2-NEXT:    movq %rax, %rcx
-; SSE2-NEXT:    shlq $61, %rcx
-; SSE2-NEXT:    sarq $63, %rcx
-; SSE2-NEXT:    movd %ecx, %xmm1
+; SSE2-NEXT:    movzbl %al, %ecx
+; SSE2-NEXT:    shrb $2, %al
+; SSE2-NEXT:    movzbl %al, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    negl %eax
+; SSE2-NEXT:    movd %eax, %xmm1
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT:    movq %rax, %rcx
-; SSE2-NEXT:    shlq $62, %rcx
-; SSE2-NEXT:    sarq $63, %rcx
-; SSE2-NEXT:    movd %ecx, %xmm2
-; SSE2-NEXT:    shlq $63, %rax
-; SSE2-NEXT:    sarq $63, %rax
+; SSE2-NEXT:    movl %ecx, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    negl %eax
 ; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    shrb %cl
+; SSE2-NEXT:    movzbl %cl, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    negl %eax
+; SSE2-NEXT:    movd %eax, %xmm2
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_4i1_to_4i32:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movl (%rdi), %eax
-; SSSE3-NEXT:    movq %rax, %rcx
-; SSSE3-NEXT:    shlq $60, %rcx
-; SSSE3-NEXT:    sarq $63, %rcx
+; SSSE3-NEXT:    movb (%rdi), %al
+; SSSE3-NEXT:    movl %eax, %ecx
+; SSSE3-NEXT:    shrb $3, %cl
+; SSSE3-NEXT:    movzbl %cl, %ecx
+; SSSE3-NEXT:    negl %ecx
 ; SSSE3-NEXT:    movd %ecx, %xmm0
-; SSSE3-NEXT:    movq %rax, %rcx
-; SSSE3-NEXT:    shlq $61, %rcx
-; SSSE3-NEXT:    sarq $63, %rcx
-; SSSE3-NEXT:    movd %ecx, %xmm1
+; SSSE3-NEXT:    movzbl %al, %ecx
+; SSSE3-NEXT:    shrb $2, %al
+; SSSE3-NEXT:    movzbl %al, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    negl %eax
+; SSSE3-NEXT:    movd %eax, %xmm1
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSSE3-NEXT:    movq %rax, %rcx
-; SSSE3-NEXT:    shlq $62, %rcx
-; SSSE3-NEXT:    sarq $63, %rcx
-; SSSE3-NEXT:    movd %ecx, %xmm2
-; SSSE3-NEXT:    shlq $63, %rax
-; SSSE3-NEXT:    sarq $63, %rax
+; SSSE3-NEXT:    movl %ecx, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    negl %eax
 ; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    shrb %cl
+; SSSE3-NEXT:    movzbl %cl, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    negl %eax
+; SSSE3-NEXT:    movd %eax, %xmm2
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 ; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_sext_4i1_to_4i32:
 ; SSE41:       # %bb.0: # %entry
-; SSE41-NEXT:    movl (%rdi), %eax
-; SSE41-NEXT:    movq %rax, %rcx
-; SSE41-NEXT:    shlq $62, %rcx
-; SSE41-NEXT:    sarq $63, %rcx
-; SSE41-NEXT:    movq %rax, %rdx
-; SSE41-NEXT:    shlq $63, %rdx
-; SSE41-NEXT:    sarq $63, %rdx
+; SSE41-NEXT:    movb (%rdi), %al
+; SSE41-NEXT:    movzbl %al, %ecx
+; SSE41-NEXT:    shrb %al
+; SSE41-NEXT:    movzbl %al, %eax
+; SSE41-NEXT:    andl $1, %eax
+; SSE41-NEXT:    negl %eax
+; SSE41-NEXT:    movl %ecx, %edx
+; SSE41-NEXT:    andl $1, %edx
+; SSE41-NEXT:    negl %edx
 ; SSE41-NEXT:    movd %edx, %xmm0
-; SSE41-NEXT:    pinsrd $1, %ecx, %xmm0
-; SSE41-NEXT:    movq %rax, %rcx
-; SSE41-NEXT:    shlq $61, %rcx
-; SSE41-NEXT:    sarq $63, %rcx
-; SSE41-NEXT:    pinsrd $2, %ecx, %xmm0
-; SSE41-NEXT:    shlq $60, %rax
-; SSE41-NEXT:    sarq $63, %rax
+; SSE41-NEXT:    pinsrd $1, %eax, %xmm0
+; SSE41-NEXT:    movl %ecx, %eax
+; SSE41-NEXT:    shrb $2, %al
+; SSE41-NEXT:    movzbl %al, %eax
+; SSE41-NEXT:    andl $1, %eax
+; SSE41-NEXT:    negl %eax
+; SSE41-NEXT:    pinsrd $2, %eax, %xmm0
+; SSE41-NEXT:    shrb $3, %cl
+; SSE41-NEXT:    movzbl %cl, %eax
+; SSE41-NEXT:    negl %eax
 ; SSE41-NEXT:    pinsrd $3, %eax, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_sext_4i1_to_4i32:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    movl (%rdi), %eax
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shlq $62, %rcx
-; AVX1-NEXT:    sarq $63, %rcx
-; AVX1-NEXT:    movq %rax, %rdx
-; AVX1-NEXT:    shlq $63, %rdx
-; AVX1-NEXT:    sarq $63, %rdx
+; AVX1-NEXT:    movb (%rdi), %al
+; AVX1-NEXT:    movzbl %al, %ecx
+; AVX1-NEXT:    shrb %al
+; AVX1-NEXT:    movzbl %al, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    negl %eax
+; AVX1-NEXT:    movl %ecx, %edx
+; AVX1-NEXT:    andl $1, %edx
+; AVX1-NEXT:    negl %edx
 ; AVX1-NEXT:    vmovd %edx, %xmm0
-; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shlq $61, %rcx
-; AVX1-NEXT:    sarq $63, %rcx
-; AVX1-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    shlq $60, %rax
-; AVX1-NEXT:    sarq $63, %rax
+; AVX1-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movl %ecx, %eax
+; AVX1-NEXT:    shrb $2, %al
+; AVX1-NEXT:    movzbl %al, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    negl %eax
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    shrb $3, %cl
+; AVX1-NEXT:    movzbl %cl, %eax
+; AVX1-NEXT:    negl %eax
 ; AVX1-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_sext_4i1_to_4i32:
 ; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    movl (%rdi), %eax
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shlq $62, %rcx
-; AVX2-NEXT:    sarq $63, %rcx
-; AVX2-NEXT:    movq %rax, %rdx
-; AVX2-NEXT:    shlq $63, %rdx
-; AVX2-NEXT:    sarq $63, %rdx
+; AVX2-NEXT:    movb (%rdi), %al
+; AVX2-NEXT:    movzbl %al, %ecx
+; AVX2-NEXT:    shrb %al
+; AVX2-NEXT:    movzbl %al, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    negl %eax
+; AVX2-NEXT:    movl %ecx, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    negl %edx
 ; AVX2-NEXT:    vmovd %edx, %xmm0
-; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shlq $61, %rcx
-; AVX2-NEXT:    sarq $63, %rcx
-; AVX2-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    shlq $60, %rax
-; AVX2-NEXT:    sarq $63, %rax
+; AVX2-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    shrb $2, %al
+; AVX2-NEXT:    movzbl %al, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    negl %eax
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    shrb $3, %cl
+; AVX2-NEXT:    movzbl %cl, %eax
+; AVX2-NEXT:    negl %eax
 ; AVX2-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
@@ -1598,40 +1630,56 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
 ; X32-SSE2-LABEL: load_sext_4i1_to_4i32:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movl (%eax), %eax
+; X32-SSE2-NEXT:    movb (%eax), %al
 ; X32-SSE2-NEXT:    movl %eax, %ecx
-; X32-SSE2-NEXT:    shll $28, %ecx
+; X32-SSE2-NEXT:    shrb $3, %cl
+; X32-SSE2-NEXT:    movzbl %cl, %ecx
+; X32-SSE2-NEXT:    negl %ecx
 ; X32-SSE2-NEXT:    movd %ecx, %xmm0
 ; X32-SSE2-NEXT:    movl %eax, %ecx
-; X32-SSE2-NEXT:    shll $29, %ecx
+; X32-SSE2-NEXT:    shrb $2, %cl
+; X32-SSE2-NEXT:    movzbl %cl, %ecx
+; X32-SSE2-NEXT:    andl $1, %ecx
+; X32-SSE2-NEXT:    negl %ecx
 ; X32-SSE2-NEXT:    movd %ecx, %xmm1
 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT:    movl %eax, %ecx
-; X32-SSE2-NEXT:    shll $30, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
-; X32-SSE2-NEXT:    shll $31, %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm0
+; X32-SSE2-NEXT:    movzbl %al, %ecx
+; X32-SSE2-NEXT:    andl $1, %ecx
+; X32-SSE2-NEXT:    negl %ecx
+; X32-SSE2-NEXT:    movd %ecx, %xmm0
+; X32-SSE2-NEXT:    shrb %al
+; X32-SSE2-NEXT:    movzbl %al, %eax
+; X32-SSE2-NEXT:    andl $1, %eax
+; X32-SSE2-NEXT:    negl %eax
+; X32-SSE2-NEXT:    movd %eax, %xmm2
 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 ; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT:    psrad $31, %xmm0
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: load_sext_4i1_to_4i32:
 ; X32-SSE41:       # %bb.0: # %entry
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movl (%eax), %eax
+; X32-SSE41-NEXT:    movb (%eax), %al
 ; X32-SSE41-NEXT:    movl %eax, %ecx
-; X32-SSE41-NEXT:    shll $30, %ecx
-; X32-SSE41-NEXT:    movl %eax, %edx
-; X32-SSE41-NEXT:    shll $31, %edx
+; X32-SSE41-NEXT:    shrb %cl
+; X32-SSE41-NEXT:    movzbl %cl, %ecx
+; X32-SSE41-NEXT:    andl $1, %ecx
+; X32-SSE41-NEXT:    negl %ecx
+; X32-SSE41-NEXT:    movzbl %al, %edx
+; X32-SSE41-NEXT:    andl $1, %edx
+; X32-SSE41-NEXT:    negl %edx
 ; X32-SSE41-NEXT:    movd %edx, %xmm0
 ; X32-SSE41-NEXT:    pinsrd $1, %ecx, %xmm0
 ; X32-SSE41-NEXT:    movl %eax, %ecx
-; X32-SSE41-NEXT:    shll $29, %ecx
+; X32-SSE41-NEXT:    shrb $2, %cl
+; X32-SSE41-NEXT:    movzbl %cl, %ecx
+; X32-SSE41-NEXT:    andl $1, %ecx
+; X32-SSE41-NEXT:    negl %ecx
 ; X32-SSE41-NEXT:    pinsrd $2, %ecx, %xmm0
-; X32-SSE41-NEXT:    shll $28, %eax
+; X32-SSE41-NEXT:    shrb $3, %al
+; X32-SSE41-NEXT:    movzbl %al, %eax
+; X32-SSE41-NEXT:    negl %eax
 ; X32-SSE41-NEXT:    pinsrd $3, %eax, %xmm0
-; X32-SSE41-NEXT:    psrad $31, %xmm0
 ; X32-SSE41-NEXT:    retl
 entry:
  %X = load <4 x i1>, <4 x i1>* %ptr
@@ -1689,25 +1737,29 @@ entry:
 define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
 ; SSE2-LABEL: load_sext_4i1_to_4i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movl (%rdi), %eax
+; SSE2-NEXT:    movb (%rdi), %al
 ; SSE2-NEXT:    movl %eax, %ecx
-; SSE2-NEXT:    shrl $3, %ecx
-; SSE2-NEXT:    movd %ecx, %xmm0
+; SSE2-NEXT:    shrb %cl
+; SSE2-NEXT:    andb $1, %cl
+; SSE2-NEXT:    movzbl %cl, %ecx
+; SSE2-NEXT:    movl %eax, %edx
+; SSE2-NEXT:    andb $1, %dl
+; SSE2-NEXT:    movzbl %dl, %edx
+; SSE2-NEXT:    movd %edx, %xmm1
+; SSE2-NEXT:    pinsrw $2, %ecx, %xmm1
 ; SSE2-NEXT:    movl %eax, %ecx
-; SSE2-NEXT:    shrl $2, %ecx
-; SSE2-NEXT:    movd %ecx, %xmm1
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT:    movd %eax, %xmm2
-; SSE2-NEXT:    shrl %eax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; SSE2-NEXT:    shrb $2, %cl
+; SSE2-NEXT:    andb $1, %cl
+; SSE2-NEXT:    movzbl %cl, %ecx
+; SSE2-NEXT:    pinsrw $4, %ecx, %xmm1
+; SSE2-NEXT:    shrb $3, %al
+; SSE2-NEXT:    movzbl %al, %eax
+; SSE2-NEXT:    pinsrw $6, %eax, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
 ; SSE2-NEXT:    psllq $63, %xmm0
 ; SSE2-NEXT:    psrad $31, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
 ; SSE2-NEXT:    psllq $63, %xmm1
 ; SSE2-NEXT:    psrad $31, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -1715,25 +1767,29 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
 ;
 ; SSSE3-LABEL: load_sext_4i1_to_4i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movl (%rdi), %eax
+; SSSE3-NEXT:    movb (%rdi), %al
 ; SSSE3-NEXT:    movl %eax, %ecx
-; SSSE3-NEXT:    shrl $3, %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm0
+; SSSE3-NEXT:    shrb %cl
+; SSSE3-NEXT:    andb $1, %cl
+; SSSE3-NEXT:    movzbl %cl, %ecx
+; SSSE3-NEXT:    movl %eax, %edx
+; SSSE3-NEXT:    andb $1, %dl
+; SSSE3-NEXT:    movzbl %dl, %edx
+; SSSE3-NEXT:    movd %edx, %xmm1
+; SSSE3-NEXT:    pinsrw $2, %ecx, %xmm1
 ; SSSE3-NEXT:    movl %eax, %ecx
-; SSSE3-NEXT:    shrl $2, %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm1
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSSE3-NEXT:    movd %eax, %xmm2
-; SSSE3-NEXT:    shrl %eax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSSE3-NEXT:    pand {{.*}}(%rip), %xmm2
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; SSSE3-NEXT:    shrb $2, %cl
+; SSSE3-NEXT:    andb $1, %cl
+; SSSE3-NEXT:    movzbl %cl, %ecx
+; SSSE3-NEXT:    pinsrw $4, %ecx, %xmm1
+; SSSE3-NEXT:    shrb $3, %al
+; SSSE3-NEXT:    movzbl %al, %eax
+; SSSE3-NEXT:    pinsrw $6, %eax, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
 ; SSSE3-NEXT:    psllq $63, %xmm0
 ; SSSE3-NEXT:    psrad $31, %xmm0
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
 ; SSSE3-NEXT:    psllq $63, %xmm1
 ; SSSE3-NEXT:    psrad $31, %xmm1
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -1741,18 +1797,25 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
 ;
 ; SSE41-LABEL: load_sext_4i1_to_4i64:
 ; SSE41:       # %bb.0: # %entry
-; SSE41-NEXT:    movl (%rdi), %eax
+; SSE41-NEXT:    movb (%rdi), %al
 ; SSE41-NEXT:    movl %eax, %ecx
-; SSE41-NEXT:    shrl %ecx
-; SSE41-NEXT:    movd %eax, %xmm1
-; SSE41-NEXT:    pinsrd $1, %ecx, %xmm1
+; SSE41-NEXT:    shrb %cl
+; SSE41-NEXT:    andb $1, %cl
+; SSE41-NEXT:    movzbl %cl, %ecx
+; SSE41-NEXT:    movl %eax, %edx
+; SSE41-NEXT:    andb $1, %dl
+; SSE41-NEXT:    movzbl %dl, %edx
+; SSE41-NEXT:    movd %edx, %xmm1
+; SSE41-NEXT:    pinsrb $4, %ecx, %xmm1
 ; SSE41-NEXT:    movl %eax, %ecx
-; SSE41-NEXT:    shrl $2, %ecx
+; SSE41-NEXT:    shrb $2, %cl
+; SSE41-NEXT:    andb $1, %cl
+; SSE41-NEXT:    movzbl %cl, %ecx
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT:    pinsrd $2, %ecx, %xmm1
-; SSE41-NEXT:    shrl $3, %eax
-; SSE41-NEXT:    pinsrd $3, %eax, %xmm1
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pinsrb $8, %ecx, %xmm1
+; SSE41-NEXT:    shrb $3, %al
+; SSE41-NEXT:    movzbl %al, %eax
+; SSE41-NEXT:    pinsrb $12, %eax, %xmm1
 ; SSE41-NEXT:    psllq $63, %xmm0
 ; SSE41-NEXT:    psrad $31, %xmm0
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -1764,21 +1827,26 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
 ;
 ; AVX1-LABEL: load_sext_4i1_to_4i64:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    movl (%rdi), %eax
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shlq $62, %rcx
-; AVX1-NEXT:    sarq $63, %rcx
-; AVX1-NEXT:    movq %rax, %rdx
-; AVX1-NEXT:    shlq $63, %rdx
-; AVX1-NEXT:    sarq $63, %rdx
+; AVX1-NEXT:    movb (%rdi), %al
+; AVX1-NEXT:    movzbl %al, %ecx
+; AVX1-NEXT:    shrb %al
+; AVX1-NEXT:    movzbl %al, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    negl %eax
+; AVX1-NEXT:    movl %ecx, %edx
+; AVX1-NEXT:    andl $1, %edx
+; AVX1-NEXT:    negl %edx
 ; AVX1-NEXT:    vmovd %edx, %xmm0
-; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shlq $61, %rcx
-; AVX1-NEXT:    sarq $63, %rcx
-; AVX1-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    shlq $60, %rax
-; AVX1-NEXT:    sarq $63, %rax
+; AVX1-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movl %ecx, %eax
+; AVX1-NEXT:    shrb $2, %al
+; AVX1-NEXT:    movzbl %al, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    negl %eax
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    shrb $3, %cl
+; AVX1-NEXT:    movzbl %cl, %eax
+; AVX1-NEXT:    negl %eax
 ; AVX1-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1788,23 +1856,30 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
 ;
 ; AVX2-LABEL: load_sext_4i1_to_4i64:
 ; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    movl (%rdi), %eax
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shlq $60, %rcx
+; AVX2-NEXT:    movb (%rdi), %al
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    shrb $3, %cl
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    negq %rcx
 ; AVX2-NEXT:    vmovq %rcx, %xmm0
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shlq $61, %rcx
-; AVX2-NEXT:    vmovq %rcx, %xmm1
+; AVX2-NEXT:    movzbl %al, %ecx
+; AVX2-NEXT:    shrb $2, %al
+; AVX2-NEXT:    movzbl %al, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    negq %rax
+; AVX2-NEXT:    vmovq %rax, %xmm1
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shlq $62, %rcx
-; AVX2-NEXT:    vmovq %rcx, %xmm1
-; AVX2-NEXT:    shlq $63, %rax
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    negq %rax
+; AVX2-NEXT:    vmovq %rax, %xmm1
+; AVX2-NEXT:    shrb %cl
+; AVX2-NEXT:    movzbl %cl, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    negq %rax
 ; AVX2-NEXT:    vmovq %rax, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_sext_4i1_to_4i64:
@@ -1817,25 +1892,29 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
 ; X32-SSE2-LABEL: load_sext_4i1_to_4i64:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movzbl (%eax), %eax
+; X32-SSE2-NEXT:    movb (%eax), %al
 ; X32-SSE2-NEXT:    movl %eax, %ecx
-; X32-SSE2-NEXT:    shrl $3, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
+; X32-SSE2-NEXT:    shrb %cl
+; X32-SSE2-NEXT:    andb $1, %cl
+; X32-SSE2-NEXT:    movzbl %cl, %ecx
+; X32-SSE2-NEXT:    movl %eax, %edx
+; X32-SSE2-NEXT:    andb $1, %dl
+; X32-SSE2-NEXT:    movzbl %dl, %edx
+; X32-SSE2-NEXT:    movd %edx, %xmm1
+; X32-SSE2-NEXT:    pinsrw $2, %ecx, %xmm1
 ; X32-SSE2-NEXT:    movl %eax, %ecx
-; X32-SSE2-NEXT:    shrl $2, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT:    movd %eax, %xmm2
-; X32-SSE2-NEXT:    shrl %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm0
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; X32-SSE2-NEXT:    pand {{\.LCPI.*}}, %xmm2
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; X32-SSE2-NEXT:    shrb $2, %cl
+; X32-SSE2-NEXT:    andb $1, %cl
+; X32-SSE2-NEXT:    movzbl %cl, %ecx
+; X32-SSE2-NEXT:    pinsrw $4, %ecx, %xmm1
+; X32-SSE2-NEXT:    shrb $3, %al
+; X32-SSE2-NEXT:    movzbl %al, %eax
+; X32-SSE2-NEXT:    pinsrw $6, %eax, %xmm1
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
 ; X32-SSE2-NEXT:    psllq $63, %xmm0
 ; X32-SSE2-NEXT:    psrad $31, %xmm0
 ; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
 ; X32-SSE2-NEXT:    psllq $63, %xmm1
 ; X32-SSE2-NEXT:    psrad $31, %xmm1
 ; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -1844,18 +1923,25 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
 ; X32-SSE41-LABEL: load_sext_4i1_to_4i64:
 ; X32-SSE41:       # %bb.0: # %entry
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movzbl (%eax), %eax
+; X32-SSE41-NEXT:    movb (%eax), %al
 ; X32-SSE41-NEXT:    movl %eax, %ecx
-; X32-SSE41-NEXT:    shrl %ecx
-; X32-SSE41-NEXT:    movd %eax, %xmm1
-; X32-SSE41-NEXT:    pinsrd $1, %ecx, %xmm1
+; X32-SSE41-NEXT:    shrb %cl
+; X32-SSE41-NEXT:    andb $1, %cl
+; X32-SSE41-NEXT:    movzbl %cl, %ecx
+; X32-SSE41-NEXT:    movl %eax, %edx
+; X32-SSE41-NEXT:    andb $1, %dl
+; X32-SSE41-NEXT:    movzbl %dl, %edx
+; X32-SSE41-NEXT:    movd %edx, %xmm1
+; X32-SSE41-NEXT:    pinsrb $4, %ecx, %xmm1
 ; X32-SSE41-NEXT:    movl %eax, %ecx
-; X32-SSE41-NEXT:    shrl $2, %ecx
+; X32-SSE41-NEXT:    shrb $2, %cl
+; X32-SSE41-NEXT:    andb $1, %cl
+; X32-SSE41-NEXT:    movzbl %cl, %ecx
 ; X32-SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; X32-SSE41-NEXT:    pinsrd $2, %ecx, %xmm1
-; X32-SSE41-NEXT:    shrl $3, %eax
-; X32-SSE41-NEXT:    pinsrd $3, %eax, %xmm1
-; X32-SSE41-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE41-NEXT:    pinsrb $8, %ecx, %xmm1
+; X32-SSE41-NEXT:    shrb $3, %al
+; X32-SSE41-NEXT:    movzbl %al, %eax
+; X32-SSE41-NEXT:    pinsrb $12, %eax, %xmm1
 ; X32-SSE41-NEXT:    psllq $63, %xmm0
 ; X32-SSE41-NEXT:    psrad $31, %xmm0
 ; X32-SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -3376,25 +3462,27 @@ define <4 x i32> @sext_4i17_to_4i32(<4 x i17>* %ptr) {
 ; SSE2-LABEL: sext_4i17_to_4i32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq (%rdi), %rax
+; SSE2-NEXT:    movl %eax, %ecx
+; SSE2-NEXT:    shll $15, %ecx
+; SSE2-NEXT:    sarl $15, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm0
 ; SSE2-NEXT:    movq %rax, %rcx
-; SSE2-NEXT:    shlq $30, %rcx
-; SSE2-NEXT:    sarq $47, %rcx
+; SSE2-NEXT:    shrq $17, %rcx
+; SSE2-NEXT:    shll $15, %ecx
+; SSE2-NEXT:    sarl $15, %ecx
 ; SSE2-NEXT:    movd %ecx, %xmm1
-; SSE2-NEXT:    movq %rax, %rcx
-; SSE2-NEXT:    shlq $47, %rcx
-; SSE2-NEXT:    sarq $47, %rcx
-; SSE2-NEXT:    movd %ecx, %xmm0
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movl 8(%rdi), %ecx
 ; SSE2-NEXT:    shll $13, %ecx
 ; SSE2-NEXT:    movq %rax, %rdx
 ; SSE2-NEXT:    shrq $51, %rdx
 ; SSE2-NEXT:    orl %ecx, %edx
-; SSE2-NEXT:    shlq $47, %rdx
-; SSE2-NEXT:    sarq $47, %rdx
+; SSE2-NEXT:    shll $15, %edx
+; SSE2-NEXT:    sarl $15, %edx
 ; SSE2-NEXT:    movd %edx, %xmm1
-; SSE2-NEXT:    shlq $13, %rax
-; SSE2-NEXT:    sarq $47, %rax
+; SSE2-NEXT:    shrq $34, %rax
+; SSE2-NEXT:    shll $15, %eax
+; SSE2-NEXT:    sarl $15, %eax
 ; SSE2-NEXT:    movd %eax, %xmm2
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -3403,25 +3491,27 @@ define <4 x i32> @sext_4i17_to_4i32(<4 x i17>* %ptr) {
 ; SSSE3-LABEL: sext_4i17_to_4i32:
 ; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movq (%rdi), %rax
+; SSSE3-NEXT:    movl %eax, %ecx
+; SSSE3-NEXT:    shll $15, %ecx
+; SSSE3-NEXT:    sarl $15, %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm0
 ; SSSE3-NEXT:    movq %rax, %rcx
-; SSSE3-NEXT:    shlq $30, %rcx
-; SSSE3-NEXT:    sarq $47, %rcx
+; SSSE3-NEXT:    shrq $17, %rcx
+; SSSE3-NEXT:    shll $15, %ecx
+; SSSE3-NEXT:    sarl $15, %ecx
 ; SSSE3-NEXT:    movd %ecx, %xmm1
-; SSSE3-NEXT:    movq %rax, %rcx
-; SSSE3-NEXT:    shlq $47, %rcx
-; SSSE3-NEXT:    sarq $47, %rcx
-; SSSE3-NEXT:    movd %ecx, %xmm0
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSSE3-NEXT:    movl 8(%rdi), %ecx
 ; SSSE3-NEXT:    shll $13, %ecx
 ; SSSE3-NEXT:    movq %rax, %rdx
 ; SSSE3-NEXT:    shrq $51, %rdx
 ; SSSE3-NEXT:    orl %ecx, %edx
-; SSSE3-NEXT:    shlq $47, %rdx
-; SSSE3-NEXT:    sarq $47, %rdx
+; SSSE3-NEXT:    shll $15, %edx
+; SSSE3-NEXT:    sarl $15, %edx
 ; SSSE3-NEXT:    movd %edx, %xmm1
-; SSSE3-NEXT:    shlq $13, %rax
-; SSSE3-NEXT:    sarq $47, %rax
+; SSSE3-NEXT:    shrq $34, %rax
+; SSSE3-NEXT:    shll $15, %eax
+; SSSE3-NEXT:    sarl $15, %eax
 ; SSSE3-NEXT:    movd %eax, %xmm2
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -3431,23 +3521,25 @@ define <4 x i32> @sext_4i17_to_4i32(<4 x i17>* %ptr) {
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movq (%rdi), %rax
 ; SSE41-NEXT:    movq %rax, %rcx
-; SSE41-NEXT:    shlq $30, %rcx
-; SSE41-NEXT:    sarq $47, %rcx
-; SSE41-NEXT:    movq %rax, %rdx
-; SSE41-NEXT:    shlq $47, %rdx
-; SSE41-NEXT:    sarq $47, %rdx
+; SSE41-NEXT:    shrq $17, %rcx
+; SSE41-NEXT:    shll $15, %ecx
+; SSE41-NEXT:    sarl $15, %ecx
+; SSE41-NEXT:    movl %eax, %edx
+; SSE41-NEXT:    shll $15, %edx
+; SSE41-NEXT:    sarl $15, %edx
 ; SSE41-NEXT:    movd %edx, %xmm0
 ; SSE41-NEXT:    pinsrd $1, %ecx, %xmm0
 ; SSE41-NEXT:    movq %rax, %rcx
-; SSE41-NEXT:    shlq $13, %rcx
-; SSE41-NEXT:    sarq $47, %rcx
+; SSE41-NEXT:    shrq $34, %rcx
+; SSE41-NEXT:    shll $15, %ecx
+; SSE41-NEXT:    sarl $15, %ecx
 ; SSE41-NEXT:    pinsrd $2, %ecx, %xmm0
 ; SSE41-NEXT:    movl 8(%rdi), %ecx
 ; SSE41-NEXT:    shll $13, %ecx
 ; SSE41-NEXT:    shrq $51, %rax
 ; SSE41-NEXT:    orl %ecx, %eax
-; SSE41-NEXT:    shlq $47, %rax
-; SSE41-NEXT:    sarq $47, %rax
+; SSE41-NEXT:    shll $15, %eax
+; SSE41-NEXT:    sarl $15, %eax
 ; SSE41-NEXT:    pinsrd $3, %eax, %xmm0
 ; SSE41-NEXT:    retq
 ;
@@ -3455,23 +3547,25 @@ define <4 x i32> @sext_4i17_to_4i32(<4 x i17>* %ptr) {
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    movq (%rdi), %rax
 ; AVX-NEXT:    movq %rax, %rcx
-; AVX-NEXT:    shlq $30, %rcx
-; AVX-NEXT:    sarq $47, %rcx
-; AVX-NEXT:    movq %rax, %rdx
-; AVX-NEXT:    shlq $47, %rdx
-; AVX-NEXT:    sarq $47, %rdx
+; AVX-NEXT:    shrq $17, %rcx
+; AVX-NEXT:    shll $15, %ecx
+; AVX-NEXT:    sarl $15, %ecx
+; AVX-NEXT:    movl %eax, %edx
+; AVX-NEXT:    shll $15, %edx
+; AVX-NEXT:    sarl $15, %edx
 ; AVX-NEXT:    vmovd %edx, %xmm0
 ; AVX-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
 ; AVX-NEXT:    movq %rax, %rcx
-; AVX-NEXT:    shlq $13, %rcx
-; AVX-NEXT:    sarq $47, %rcx
+; AVX-NEXT:    shrq $34, %rcx
+; AVX-NEXT:    shll $15, %ecx
+; AVX-NEXT:    sarl $15, %ecx
 ; AVX-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
 ; AVX-NEXT:    movl 8(%rdi), %ecx
 ; AVX-NEXT:    shll $13, %ecx
 ; AVX-NEXT:    shrq $51, %rax
 ; AVX-NEXT:    orl %ecx, %eax
-; AVX-NEXT:    shlq $47, %rax
-; AVX-NEXT:    sarq $47, %rax
+; AVX-NEXT:    shll $15, %eax
+; AVX-NEXT:    sarl $15, %eax
 ; AVX-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
@@ -3483,19 +3577,22 @@ define <4 x i32> @sext_4i17_to_4i32(<4 x i17>* %ptr) {
 ; X32-SSE2-NEXT:    movl 8(%eax), %eax
 ; X32-SSE2-NEXT:    shldl $13, %edx, %eax
 ; X32-SSE2-NEXT:    shll $15, %eax
+; X32-SSE2-NEXT:    sarl $15, %eax
 ; X32-SSE2-NEXT:    movd %eax, %xmm0
 ; X32-SSE2-NEXT:    movl %edx, %eax
 ; X32-SSE2-NEXT:    shll $13, %eax
+; X32-SSE2-NEXT:    sarl $15, %eax
 ; X32-SSE2-NEXT:    movd %eax, %xmm1
 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; X32-SSE2-NEXT:    shldl $15, %ecx, %edx
 ; X32-SSE2-NEXT:    shll $15, %ecx
+; X32-SSE2-NEXT:    sarl $15, %ecx
 ; X32-SSE2-NEXT:    movd %ecx, %xmm0
 ; X32-SSE2-NEXT:    shll $15, %edx
+; X32-SSE2-NEXT:    sarl $15, %edx
 ; X32-SSE2-NEXT:    movd %edx, %xmm2
 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 ; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT:    psrad $15, %xmm0
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: sext_4i17_to_4i32:
@@ -3511,14 +3608,17 @@ define <4 x i32> @sext_4i17_to_4i32(<4 x i17>* %ptr) {
 ; X32-SSE41-NEXT:    shldl $13, %edx, %eax
 ; X32-SSE41-NEXT:    shldl $15, %ecx, %edx
 ; X32-SSE41-NEXT:    shll $15, %edx
+; X32-SSE41-NEXT:    sarl $15, %edx
 ; X32-SSE41-NEXT:    shll $15, %ecx
+; X32-SSE41-NEXT:    sarl $15, %ecx
 ; X32-SSE41-NEXT:    movd %ecx, %xmm0
 ; X32-SSE41-NEXT:    pinsrd $1, %edx, %xmm0
 ; X32-SSE41-NEXT:    shll $13, %esi
+; X32-SSE41-NEXT:    sarl $15, %esi
 ; X32-SSE41-NEXT:    pinsrd $2, %esi, %xmm0
 ; X32-SSE41-NEXT:    shll $15, %eax
+; X32-SSE41-NEXT:    sarl $15, %eax
 ; X32-SSE41-NEXT:    pinsrd $3, %eax, %xmm0
-; X32-SSE41-NEXT:    psrad $15, %xmm0
 ; X32-SSE41-NEXT:    popl %esi
 ; X32-SSE41-NEXT:    .cfi_def_cfa_offset 4
 ; X32-SSE41-NEXT:    retl


        


More information about the llvm-commits mailing list