[llvm] r239509 - [X86][SSE] Vectorized i8 and i16 shift operators

Simon Pilgrim llvm-dev at redking.me.uk
Thu Jun 11 00:46:37 PDT 2015


Author: rksimon
Date: Thu Jun 11 02:46:37 2015
New Revision: 239509

URL: http://llvm.org/viewvc/llvm-project?rev=239509&view=rev
Log:
[X86][SSE] Vectorized i8 and i16 shift operators

This patch ensures that SHL/SRL/SRA shifts for i8 and i16 vectors avoid scalarization. It builds on the existing i8 SHL vectorized implementation of moving the shift bits up to the sign bit position and separating the 4, 2 & 1 bit shifts with several improvements:

1 - SSE41 targets can use (v)pblendvb directly with the sign bit instead of performing a comparison to feed into a VSELECT node.
2 - pre-SSE41 targets were masking + comparing with an 0x80 constant - we avoid this by using the fact that a set sign bit means a negative integer which can be compared against zero to then feed into VSELECT, avoiding the need for a constant mask (zero generation is much cheaper).
3 - SRA i8 needs to be unpacked to the upper byte of a i16 so that the i16 psraw instruction can be correctly used for sign extension - we have to do more work than for SHL/SRL but perf tests indicate that this is still beneficial.

The i16 implementation is similar but simpler than for i8 - we have to do 8, 4, 2 & 1 bit shifts but less shift masking is involved. SSE41 use of (v)pblendvb requires that the i16 shift amount is splatted to both bytes however.

Tested on SSE2, SSE41 and AVX machines.

Differential Revision: http://reviews.llvm.org/D9474

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
    llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll
    llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll
    llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll
    llvm/trunk/test/CodeGen/X86/2011-12-15-vec_shift.ll
    llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll
    llvm/trunk/test/CodeGen/X86/vec_shift8.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=239509&r1=239508&r2=239509&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Jun 11 02:46:37 2015
@@ -17012,36 +17012,111 @@ static SDValue LowerShift(SDValue Op, co
     }
   }
 
-  if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
-    // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
-    Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, dl, VT));
-
-    SDValue VSelM = DAG.getConstant(0x80, dl, VT);
-    SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
-    OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
-
-    // r = VSELECT(r, shl(r, 4), a);
-    SDValue M = DAG.getNode(ISD::SHL, dl, VT, R, DAG.getConstant(4, dl, VT));
-    R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
+  if (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget->hasInt256())) {
+    MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
+    unsigned ShiftOpcode = Op->getOpcode();
+
+    auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
+      // On SSE41 targets we make use of the fact that VSELECT lowers
+      // to PBLENDVB which selects bytes based just on the sign bit.
+      if (Subtarget->hasSSE41()) {
+        V0 = DAG.getBitcast(VT, V0);
+        V1 = DAG.getBitcast(VT, V1);
+        Sel = DAG.getBitcast(VT, Sel);
+        return DAG.getBitcast(SelVT,
+                              DAG.getNode(ISD::VSELECT, dl, VT, Sel, V0, V1));
+      }
+      // On pre-SSE41 targets we test for the sign bit by comparing to
+      // zero - a negative value will set all bits of the lanes to true
+      // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
+      SDValue Z = getZeroVector(SelVT, Subtarget, DAG, dl);
+      SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
+      return DAG.getNode(ISD::VSELECT, dl, SelVT, C, V0, V1);
+    };
 
-    // a += a
-    Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
-    OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
-    OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
-
-    // r = VSELECT(r, shl(r, 2), a);
-    M = DAG.getNode(ISD::SHL, dl, VT, R, DAG.getConstant(2, dl, VT));
-    R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
+    // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
+    // We can safely do this using i16 shifts as we're only interested in
+    // the 3 lower bits of each byte.
+    Amt = DAG.getBitcast(ExtVT, Amt);
+    Amt = DAG.getNode(ISD::SHL, dl, ExtVT, Amt, DAG.getConstant(5, dl, ExtVT));
+    Amt = DAG.getBitcast(VT, Amt);
+
+    if (Op->getOpcode() == ISD::SHL || Op->getOpcode() == ISD::SRL) {
+      // r = VSELECT(r, shift(r, 4), a);
+      SDValue M =
+          DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
+      R = SignBitSelect(VT, Amt, M, R);
+
+      // a += a
+      Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+      // r = VSELECT(r, shift(r, 2), a);
+      M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
+      R = SignBitSelect(VT, Amt, M, R);
+
+      // a += a
+      Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+      // return VSELECT(r, shift(r, 1), a);
+      M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
+      R = SignBitSelect(VT, Amt, M, R);
+      return R;
+    }
 
-    // a += a
-    Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
-    OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
-    OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
-
-    // return VSELECT(r, r+r, a);
-    R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
-                    DAG.getNode(ISD::ADD, dl, VT, R, R), R);
-    return R;
+    if (Op->getOpcode() == ISD::SRA) {
+      // For SRA we need to unpack each byte to the higher byte of a i16 vector
+      // so we can correctly sign extend. We don't care what happens to the
+      // lower byte.
+      SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt);
+      SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt);
+      SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R);
+      SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R);
+      ALo = DAG.getBitcast(ExtVT, ALo);
+      AHi = DAG.getBitcast(ExtVT, AHi);
+      RLo = DAG.getBitcast(ExtVT, RLo);
+      RHi = DAG.getBitcast(ExtVT, RHi);
+
+      // r = VSELECT(r, shift(r, 4), a);
+      SDValue MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
+                                DAG.getConstant(4, dl, ExtVT));
+      SDValue MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
+                                DAG.getConstant(4, dl, ExtVT));
+      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
+      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
+
+      // a += a
+      ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
+      AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
+
+      // r = VSELECT(r, shift(r, 2), a);
+      MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
+                        DAG.getConstant(2, dl, ExtVT));
+      MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
+                        DAG.getConstant(2, dl, ExtVT));
+      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
+      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
+
+      // a += a
+      ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
+      AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
+
+      // r = VSELECT(r, shift(r, 1), a);
+      MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
+                        DAG.getConstant(1, dl, ExtVT));
+      MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
+                        DAG.getConstant(1, dl, ExtVT));
+      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
+      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
+
+      // Logical shift the result back to the lower byte, leaving a zero upper
+      // byte
+      // meaning that we can safely pack with PACKUSWB.
+      RLo =
+          DAG.getNode(ISD::SRL, dl, ExtVT, RLo, DAG.getConstant(8, dl, ExtVT));
+      RHi =
+          DAG.getNode(ISD::SRL, dl, ExtVT, RHi, DAG.getConstant(8, dl, ExtVT));
+      return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
+    }
   }
 
   // It's worth extending once and using the v8i32 shifts for 16-bit types, but
@@ -17075,6 +17150,67 @@ static SDValue LowerShift(SDValue Op, co
     return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
   }
 
+  if (VT == MVT::v8i16) {
+    unsigned ShiftOpcode = Op->getOpcode();
+
+    auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
+      // On SSE41 targets we make use of the fact that VSELECT lowers
+      // to PBLENDVB which selects bytes based just on the sign bit.
+      if (Subtarget->hasSSE41()) {
+        MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
+        V0 = DAG.getBitcast(ExtVT, V0);
+        V1 = DAG.getBitcast(ExtVT, V1);
+        Sel = DAG.getBitcast(ExtVT, Sel);
+        return DAG.getBitcast(
+            VT, DAG.getNode(ISD::VSELECT, dl, ExtVT, Sel, V0, V1));
+      }
+      // On pre-SSE41 targets we splat the sign bit - a negative value will
+      // set all bits of the lanes to true and VSELECT uses that in
+      // its OR(AND(V0,C),AND(V1,~C)) lowering.
+      SDValue C =
+          DAG.getNode(ISD::SRA, dl, VT, Sel, DAG.getConstant(15, dl, VT));
+      return DAG.getNode(ISD::VSELECT, dl, VT, C, V0, V1);
+    };
+
+    // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
+    if (Subtarget->hasSSE41()) {
+      // On SSE41 targets we need to replicate the shift mask in both
+      // bytes for PBLENDVB.
+      Amt = DAG.getNode(
+          ISD::OR, dl, VT,
+          DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)),
+          DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)));
+    } else {
+      Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT));
+    }
+
+    // r = VSELECT(r, shift(r, 8), a);
+    SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT));
+    R = SignBitSelect(Amt, M, R);
+
+    // a += a
+    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+    // r = VSELECT(r, shift(r, 4), a);
+    M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
+    R = SignBitSelect(Amt, M, R);
+
+    // a += a
+    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+    // r = VSELECT(r, shift(r, 2), a);
+    M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
+    R = SignBitSelect(Amt, M, R);
+
+    // a += a
+    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+    // return VSELECT(r, shift(r, 1), a);
+    M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
+    R = SignBitSelect(Amt, M, R);
+    return R;
+  }
+
   // Decompose 256-bit shifts into smaller 128-bit shifts.
   if (VT.is256BitVector()) {
     unsigned NumElems = VT.getVectorNumElements();

Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp?rev=239509&r1=239508&r2=239509&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp Thu Jun 11 02:46:37 2015
@@ -153,13 +153,13 @@ unsigned X86TTIImpl::getArithmeticInstrC
     { ISD::SHL,     MVT::v4i64,    1 },
     { ISD::SRL,     MVT::v4i64,    1 },
 
-    { ISD::SHL,  MVT::v32i8,      42 }, // cmpeqb sequence.
+    { ISD::SHL,  MVT::v32i8,      11 }, // vpblendvb sequence.
     { ISD::SHL,  MVT::v16i16,     10 }, // extend/vpsrlvd/pack sequence.
 
-    { ISD::SRL,  MVT::v32i8,   32*10 }, // Scalarized.
+    { ISD::SRL,  MVT::v32i8,      11 }, // vpblendvb sequence.
     { ISD::SRL,  MVT::v16i16,     10 }, // extend/vpsrlvd/pack sequence.
 
-    { ISD::SRA,  MVT::v32i8,   32*10 }, // Scalarized.
+    { ISD::SRA,  MVT::v32i8,      24 }, // vpblendvb sequence.
     { ISD::SRA,  MVT::v16i16,     10 }, // extend/vpsravd/pack sequence.
     { ISD::SRA,  MVT::v4i64,    4*10 }, // Scalarized.
 
@@ -253,19 +253,19 @@ unsigned X86TTIImpl::getArithmeticInstrC
     // to ISel. The cost model must return worst case assumptions because it is
     // used for vectorization and we don't want to make vectorized code worse
     // than scalar code.
-    { ISD::SHL,  MVT::v16i8,  30 }, // cmpeqb sequence.
-    { ISD::SHL,  MVT::v8i16,  8*10 }, // Scalarized.
-    { ISD::SHL,  MVT::v4i32,  2*5 }, // We optimized this using mul.
+    { ISD::SHL,  MVT::v16i8,    26 }, // cmpgtb sequence.
+    { ISD::SHL,  MVT::v8i16,    32 }, // cmpgtb sequence.
+    { ISD::SHL,  MVT::v4i32,   2*5 }, // We optimized this using mul.
     { ISD::SHL,  MVT::v2i64,  2*10 }, // Scalarized.
     { ISD::SHL,  MVT::v4i64,  4*10 }, // Scalarized.
 
-    { ISD::SRL,  MVT::v16i8,  16*10 }, // Scalarized.
-    { ISD::SRL,  MVT::v8i16,  8*10 }, // Scalarized.
+    { ISD::SRL,  MVT::v16i8,    26 }, // cmpgtb sequence.
+    { ISD::SRL,  MVT::v8i16,    32 }, // cmpgtb sequence.
     { ISD::SRL,  MVT::v4i32,  4*10 }, // Scalarized.
     { ISD::SRL,  MVT::v2i64,  2*10 }, // Scalarized.
 
-    { ISD::SRA,  MVT::v16i8,  16*10 }, // Scalarized.
-    { ISD::SRA,  MVT::v8i16,  8*10 }, // Scalarized.
+    { ISD::SRA,  MVT::v16i8,    54 }, // unpacked cmpgtb sequence.
+    { ISD::SRA,  MVT::v8i16,    32 }, // cmpgtb sequence.
     { ISD::SRA,  MVT::v4i32,  4*10 }, // Scalarized.
     { ISD::SRA,  MVT::v2i64,  2*10 }, // Scalarized.
 

Modified: llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll?rev=239509&r1=239508&r2=239509&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll Thu Jun 11 02:46:37 2015
@@ -29,9 +29,9 @@ entry:
 define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) {
 entry:
   ; SSE2: shift8i16
-  ; SSE2: cost of 80 {{.*}} ashr
+  ; SSE2: cost of 32 {{.*}} ashr
   ; SSE2-CODEGEN: shift8i16
-  ; SSE2-CODEGEN: sarw %cl
+  ; SSE2-CODEGEN: psraw
 
   %0 = ashr %shifttype8i16 %a , %b
   ret %shifttype8i16 %0
@@ -41,9 +41,9 @@ entry:
 define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) {
 entry:
   ; SSE2: shift16i16
-  ; SSE2: cost of 160 {{.*}} ashr
+  ; SSE2: cost of 64 {{.*}} ashr
   ; SSE2-CODEGEN: shift16i16
-  ; SSE2-CODEGEN: sarw %cl
+  ; SSE2-CODEGEN: psraw
 
   %0 = ashr %shifttype16i16 %a , %b
   ret %shifttype16i16 %0
@@ -53,9 +53,9 @@ entry:
 define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) {
 entry:
   ; SSE2: shift32i16
-  ; SSE2: cost of 320 {{.*}} ashr
+  ; SSE2: cost of 128 {{.*}} ashr
   ; SSE2-CODEGEN: shift32i16
-  ; SSE2-CODEGEN: sarw %cl
+  ; SSE2-CODEGEN: psraw
 
   %0 = ashr %shifttype32i16 %a , %b
   ret %shifttype32i16 %0
@@ -209,9 +209,9 @@ entry:
 define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) {
 entry:
   ; SSE2: shift8i8
-  ; SSE2: cost of 80 {{.*}} ashr
+  ; SSE2: cost of 32 {{.*}} ashr
   ; SSE2-CODEGEN: shift8i8
-  ; SSE2-CODEGEN: sarw %cl
+  ; SSE2-CODEGEN: psraw
 
   %0 = ashr %shifttype8i8 %a , %b
   ret %shifttype8i8 %0
@@ -221,9 +221,9 @@ entry:
 define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) {
 entry:
   ; SSE2: shift16i8
-  ; SSE2: cost of 160 {{.*}} ashr
+  ; SSE2: cost of 54 {{.*}} ashr
   ; SSE2-CODEGEN: shift16i8
-  ; SSE2-CODEGEN: sarb %cl
+  ; SSE2-CODEGEN: psraw
 
   %0 = ashr %shifttype16i8 %a , %b
   ret %shifttype16i8 %0
@@ -233,9 +233,9 @@ entry:
 define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) {
 entry:
   ; SSE2: shift32i8
-  ; SSE2: cost of 320 {{.*}} ashr
+  ; SSE2: cost of 108 {{.*}} ashr
   ; SSE2-CODEGEN: shift32i8
-  ; SSE2-CODEGEN: sarb %cl
+  ; SSE2-CODEGEN: psraw
 
   %0 = ashr %shifttype32i8 %a , %b
   ret %shifttype32i8 %0

Modified: llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll?rev=239509&r1=239508&r2=239509&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll Thu Jun 11 02:46:37 2015
@@ -29,9 +29,9 @@ entry:
 define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) {
 entry:
   ; SSE2: shift8i16
-  ; SSE2: cost of 80 {{.*}} lshr
+  ; SSE2: cost of 32 {{.*}} lshr
   ; SSE2-CODEGEN: shift8i16
-  ; SSE2-CODEGEN: shrl %cl
+  ; SSE2-CODEGEN: psrlw
 
   %0 = lshr %shifttype8i16 %a , %b
   ret %shifttype8i16 %0
@@ -41,9 +41,9 @@ entry:
 define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) {
 entry:
   ; SSE2: shift16i16
-  ; SSE2: cost of 160 {{.*}} lshr
+  ; SSE2: cost of 64 {{.*}} lshr
   ; SSE2-CODEGEN: shift16i16
-  ; SSE2-CODEGEN: shrl %cl
+  ; SSE2-CODEGEN: psrlw
 
   %0 = lshr %shifttype16i16 %a , %b
   ret %shifttype16i16 %0
@@ -53,9 +53,9 @@ entry:
 define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) {
 entry:
   ; SSE2: shift32i16
-  ; SSE2: cost of 320 {{.*}} lshr
+  ; SSE2: cost of 128 {{.*}} lshr
   ; SSE2-CODEGEN: shift32i16
-  ; SSE2-CODEGEN: shrl %cl
+  ; SSE2-CODEGEN: psrlw
 
   %0 = lshr %shifttype32i16 %a , %b
   ret %shifttype32i16 %0
@@ -209,9 +209,9 @@ entry:
 define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) {
 entry:
   ; SSE2: shift8i8
-  ; SSE2: cost of 80 {{.*}} lshr
+  ; SSE2: cost of 32 {{.*}} lshr
   ; SSE2-CODEGEN: shift8i8
-  ; SSE2-CODEGEN: shrl %cl
+  ; SSE2-CODEGEN: psrlw
 
   %0 = lshr %shifttype8i8 %a , %b
   ret %shifttype8i8 %0
@@ -221,9 +221,9 @@ entry:
 define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) {
 entry:
   ; SSE2: shift16i8
-  ; SSE2: cost of 160 {{.*}} lshr
+  ; SSE2: cost of 26 {{.*}} lshr
   ; SSE2-CODEGEN: shift16i8
-  ; SSE2-CODEGEN: shrb %cl
+  ; SSE2-CODEGEN: psrlw
 
   %0 = lshr %shifttype16i8 %a , %b
   ret %shifttype16i8 %0
@@ -233,9 +233,9 @@ entry:
 define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) {
 entry:
   ; SSE2: shift32i8
-  ; SSE2: cost of 320 {{.*}} lshr
+  ; SSE2: cost of 52 {{.*}} lshr
   ; SSE2-CODEGEN: shift32i8
-  ; SSE2-CODEGEN: shrb %cl
+  ; SSE2-CODEGEN: psrlw
 
   %0 = lshr %shifttype32i8 %a , %b
   ret %shifttype32i8 %0

Modified: llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll?rev=239509&r1=239508&r2=239509&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll Thu Jun 11 02:46:37 2015
@@ -29,9 +29,9 @@ entry:
 define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) {
 entry:
   ; SSE2: shift8i16
-  ; SSE2: cost of 80 {{.*}} shl
+  ; SSE2: cost of 32 {{.*}} shl
   ; SSE2-CODEGEN: shift8i16
-  ; SSE2-CODEGEN: shll %cl
+  ; SSE2-CODEGEN: psllw
 
   %0 = shl %shifttype8i16 %a , %b
   ret %shifttype8i16 %0
@@ -41,9 +41,9 @@ entry:
 define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) {
 entry:
   ; SSE2: shift16i16
-  ; SSE2: cost of 160 {{.*}} shl
+  ; SSE2: cost of 64 {{.*}} shl
   ; SSE2-CODEGEN: shift16i16
-  ; SSE2-CODEGEN: shll %cl
+  ; SSE2-CODEGEN: psllw
 
   %0 = shl %shifttype16i16 %a , %b
   ret %shifttype16i16 %0
@@ -53,9 +53,9 @@ entry:
 define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) {
 entry:
   ; SSE2: shift32i16
-  ; SSE2: cost of 320 {{.*}} shl
+  ; SSE2: cost of 128 {{.*}} shl
   ; SSE2-CODEGEN: shift32i16
-  ; SSE2-CODEGEN: shll %cl
+  ; SSE2-CODEGEN: psllw
 
   %0 = shl %shifttype32i16 %a , %b
   ret %shifttype32i16 %0
@@ -209,9 +209,9 @@ entry:
 define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) {
 entry:
   ; SSE2: shift8i8
-  ; SSE2: cost of 80 {{.*}} shl
+  ; SSE2: cost of 32 {{.*}} shl
   ; SSE2-CODEGEN: shift8i8
-  ; SSE2-CODEGEN: shll
+  ; SSE2-CODEGEN: psllw
 
   %0 = shl %shifttype8i8 %a , %b
   ret %shifttype8i8 %0
@@ -221,9 +221,9 @@ entry:
 define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) {
 entry:
   ; SSE2: shift16i8
-  ; SSE2: cost of 30 {{.*}} shl
+  ; SSE2: cost of 26 {{.*}} shl
   ; SSE2-CODEGEN: shift16i8
-  ; SSE2-CODEGEN: cmpeqb
+  ; SSE2-CODEGEN: psllw
 
   %0 = shl %shifttype16i8 %a , %b
   ret %shifttype16i8 %0
@@ -233,9 +233,9 @@ entry:
 define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) {
 entry:
   ; SSE2: shift32i8
-  ; SSE2: cost of 60 {{.*}} shl
+  ; SSE2: cost of 52 {{.*}} shl
   ; SSE2-CODEGEN: shift32i8
-  ; SSE2-CODEGEN: cmpeqb
+  ; SSE2-CODEGEN: psllw
 
   %0 = shl %shifttype32i8 %a , %b
   ret %shifttype32i8 %0

Modified: llvm/trunk/test/CodeGen/X86/2011-12-15-vec_shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-12-15-vec_shift.ll?rev=239509&r1=239508&r2=239509&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-12-15-vec_shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-12-15-vec_shift.ll Thu Jun 11 02:46:37 2015
@@ -12,8 +12,8 @@ define <16 x i8> @shift(<16 x i8> %a, <1
 
   ; Make sure we're masking and pcmp'ing the VSELECT conditon vector.
   ; CHECK-WO-SSE4: psllw $5, [[REG1:%xmm.]]
-  ; CHECK-WO-SSE4: pand [[REG1]], [[REG2:%xmm.]]
-  ; CHECK-WO-SSE4: pcmpeqb {{%xmm., }}[[REG2]]
+  ; CHECK-WO-SSE4: pxor [[REG2:%xmm.]], [[REG2:%xmm.]]
+  ; CHECK-WO-SSE4: pcmpgtb {{%xmm., }}[[REG2]]
   %1 = shl <16 x i8> %a, %b
   ret <16 x i8> %1
 }

Modified: llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll?rev=239509&r1=239508&r2=239509&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll Thu Jun 11 02:46:37 2015
@@ -302,49 +302,17 @@ define <16 x i16> @shl_16i16(<16 x i16>
 
 define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
 ; CHECK-LABEL:  shl_32i8
-; CHECK:        vextracti128 $1, %ymm0, %xmm3
-; CHECK-NEXT:   vpsllw $4, %xmm3, %xmm2
-; CHECK-NEXT:   vmovdqa  {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; CHECK-NEXT:   vpand %xmm8, %xmm2, %xmm5
-; CHECK-NEXT:   vextracti128 $1, %ymm1, %xmm2
-; CHECK-NEXT:   vpsllw $5, %xmm2, %xmm2
-; CHECK-NEXT:   vmovdqa  {{.*#+}} xmm9 = [224,224,224,224,224,224,224,224,224,224,224,224,224,224,224,224]
-; CHECK-NEXT:   vpand %xmm9, %xmm2, %xmm7
-; CHECK-NEXT:   vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; CHECK-NEXT:   vpand %xmm7, %xmm2, %xmm4
-; CHECK-NEXT:   vpcmpeqb %xmm2, %xmm4, %xmm4
-; CHECK-NEXT:   vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
-; CHECK-NEXT:   vpsllw $2, %xmm3, %xmm4
-; CHECK-NEXT:   vmovdqa  {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; CHECK-NEXT:   vpand %xmm5, %xmm4, %xmm4
-; CHECK-NEXT:   vpaddb %xmm7, %xmm7, %xmm7
-; CHECK-NEXT:   vpand %xmm7, %xmm2, %xmm6
-; CHECK-NEXT:   vpcmpeqb %xmm2, %xmm6, %xmm6
-; CHECK-NEXT:   vpblendvb %xmm6, %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:   vpaddb %xmm3, %xmm3, %xmm4
-; CHECK-NEXT:   vpaddb %xmm7, %xmm7, %xmm6
-; CHECK-NEXT:   vpand %xmm6, %xmm2, %xmm6
-; CHECK-NEXT:   vpcmpeqb %xmm2, %xmm6, %xmm6
-; CHECK-NEXT:   vpblendvb %xmm6, %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:   vpsllw $4, %xmm0, %xmm4
-; CHECK-NEXT:   vpand %xmm8, %xmm4, %xmm4
-; CHECK-NEXT:   vpsllw $5, %xmm1, %xmm1
-; CHECK-NEXT:   vpand %xmm9, %xmm1, %xmm1
-; CHECK-NEXT:   vpand %xmm1, %xmm2, %xmm6
-; CHECK-NEXT:   vpcmpeqb %xmm2, %xmm6, %xmm6
-; CHECK-NEXT:   vpblendvb %xmm6, %xmm4, %xmm0, %xmm0
-; CHECK-NEXT:   vpsllw $2, %xmm0, %xmm4
-; CHECK-NEXT:   vpand %xmm5, %xmm4, %xmm4
-; CHECK-NEXT:   vpaddb %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:   vpand %xmm1, %xmm2, %xmm5
-; CHECK-NEXT:   vpcmpeqb %xmm2, %xmm5, %xmm5
-; CHECK-NEXT:   vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
-; CHECK-NEXT:   vpaddb %xmm0, %xmm0, %xmm4
-; CHECK-NEXT:   vpaddb %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:   vpand %xmm1, %xmm2, %xmm1
-; CHECK-NEXT:   vpcmpeqb %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:   vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
-; CHECK-NEXT:   vinserti128 $1, %xmm3, %ymm0, %ymm0
+; CHECK:        vpsllw    $5, %ymm1, %ymm1
+; CHECK-NEXT:   vpsllw    $4, %ymm0, %ymm2
+; CHECK-NEXT:   vpand     {{.*}}(%rip), %ymm2, %ymm2
+; CHECK-NEXT:   vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:   vpsllw    $2, %ymm0, %ymm2
+; CHECK-NEXT:   vpand     {{.*}}(%rip), %ymm2, %ymm2
+; CHECK-NEXT:   vpaddb    %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:   vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:   vpaddb    %ymm0, %ymm0, %ymm2
+; CHECK-NEXT:   vpaddb    %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:   vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:   retq
   %shl = shl <32 x i8> %r, %a
   ret <32 x i8> %shl
@@ -381,169 +349,30 @@ define <16 x i16> @ashr_16i16(<16 x i16>
 
 define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
 ; CHECK-LABEL:  ashr_32i8
-; CHECK:        vextracti128 $1, %ymm1, %xmm2
-; CHECK-NEXT:   vpextrb $1, %xmm2, %ecx
-; CHECK-NEXT:   vextracti128 $1, %ymm0, %xmm3
-; CHECK-NEXT:   vpextrb $1, %xmm3, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   vpextrb $0, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $0, %xmm3, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   movzbl %dl, %edx
-; CHECK-NEXT:   vpextrb $2, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $2, %xmm3, %esi
-; CHECK-NEXT:   sarb %cl, %sil
-; CHECK-NEXT:   vmovd %edx, %xmm4
-; CHECK-NEXT:   vpinsrb $1, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %sil, %eax
-; CHECK-NEXT:   vpextrb $3, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $3, %xmm3, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   vpinsrb $2, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $3, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $4, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $4, %xmm3, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $4, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $5, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $5, %xmm3, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   vpextrb $6, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $6, %xmm3, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $5, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $7, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $7, %xmm3, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   vpinsrb $6, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $7, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $8, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $8, %xmm3, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $8, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $9, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $9, %xmm3, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   vpextrb $10, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $10, %xmm3, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $9, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $11, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $11, %xmm3, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   vpinsrb $10, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $11, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $12, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $12, %xmm3, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $12, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $13, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $13, %xmm3, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   vpextrb $14, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $14, %xmm3, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $13, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $15, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $15, %xmm3, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   vpextrb $1, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $1, %xmm0, %esi
-; CHECK-NEXT:   sarb %cl, %sil
-; CHECK-NEXT:   movzbl %dl, %ecx
-; CHECK-NEXT:   vpinsrb $14, %ecx, %xmm4, %xmm2
-; CHECK-NEXT:   vpextrb $0, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $0, %xmm0, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   vpextrb $2, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $2, %xmm0, %edi
-; CHECK-NEXT:   sarb %cl, %dil
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $15, %eax, %xmm2, %xmm2
-; CHECK-NEXT:   movzbl %sil, %eax
-; CHECK-NEXT:   movzbl %dl, %ecx
-; CHECK-NEXT:   vmovd %ecx, %xmm3
-; CHECK-NEXT:   vpinsrb $1, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dil, %eax
-; CHECK-NEXT:   vpextrb $3, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $3, %xmm0, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   vpinsrb $2, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $3, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $4, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $4, %xmm0, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $4, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $5, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $5, %xmm0, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   vpextrb $6, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $6, %xmm0, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $5, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $7, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $7, %xmm0, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   vpinsrb $6, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $7, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $8, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $8, %xmm0, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $8, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $9, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $9, %xmm0, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   vpextrb $10, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $10, %xmm0, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $9, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $11, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $11, %xmm0, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   vpinsrb $10, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $11, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $12, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $12, %xmm0, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $12, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $13, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $13, %xmm0, %eax
-; CHECK-NEXT:   sarb %cl, %al
-; CHECK-NEXT:   vpextrb $14, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $14, %xmm0, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $13, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $15, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $15, %xmm0, %edx
-; CHECK-NEXT:   sarb %cl, %dl
-; CHECK-NEXT:   vpinsrb $14, %eax, %xmm3, %xmm0
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $15, %eax, %xmm0, %xmm0
-; CHECK-NEXT:   vinserti128 $1, %xmm2, %ymm0, %ymm0
+; CHECK:        vpsllw     $5, %ymm1, %ymm1
+; CHECK-NEXT:   vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; CHECK-NEXT:   vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; CHECK-NEXT:   vpsraw     $4, %ymm3, %ymm4
+; CHECK-NEXT:   vpblendvb  %ymm2, %ymm4, %ymm3, %ymm3
+; CHECK-NEXT:   vpsraw     $2, %ymm3, %ymm4
+; CHECK-NEXT:   vpaddw     %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:   vpblendvb  %ymm2, %ymm4, %ymm3, %ymm3
+; CHECK-NEXT:   vpsraw     $1, %ymm3, %ymm4
+; CHECK-NEXT:   vpaddw     %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:   vpblendvb  %ymm2, %ymm4, %ymm3, %ymm2
+; CHECK-NEXT:   vpsrlw     $8, %ymm2, %ymm2
+; CHECK-NEXT:   vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; CHECK-NEXT:   vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; CHECK-NEXT:   vpsraw     $4, %ymm0, %ymm3
+; CHECK-NEXT:   vpblendvb  %ymm1, %ymm3, %ymm0, %ymm0
+; CHECK-NEXT:   vpsraw     $2, %ymm0, %ymm3
+; CHECK-NEXT:   vpaddw     %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:   vpblendvb  %ymm1, %ymm3, %ymm0, %ymm0
+; CHECK-NEXT:   vpsraw     $1, %ymm0, %ymm3
+; CHECK-NEXT:   vpaddw     %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:   vpblendvb  %ymm1, %ymm3, %ymm0, %ymm0
+; CHECK-NEXT:   vpsrlw     $8, %ymm0, %ymm0
+; CHECK-NEXT:   vpackuswb  %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:   retq
   %ashr = ashr <32 x i8> %r, %a
   ret <32 x i8> %ashr
@@ -580,169 +409,18 @@ define <16 x i16> @lshr_16i16(<16 x i16>
 
 define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
 ; CHECK-LABEL:  lshr_32i8
-; CHECK:        vextracti128 $1, %ymm1, %xmm2
-; CHECK-NEXT:   vpextrb $1, %xmm2, %ecx
-; CHECK-NEXT:   vextracti128 $1, %ymm0, %xmm3
-; CHECK-NEXT:   vpextrb $1, %xmm3, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   vpextrb $0, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $0, %xmm3, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   movzbl %dl, %edx
-; CHECK-NEXT:   vpextrb $2, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $2, %xmm3, %esi
-; CHECK-NEXT:   shrb %cl, %sil
-; CHECK-NEXT:   vmovd %edx, %xmm4
-; CHECK-NEXT:   vpinsrb $1, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %sil, %eax
-; CHECK-NEXT:   vpextrb $3, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $3, %xmm3, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   vpinsrb $2, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $3, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $4, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $4, %xmm3, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $4, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $5, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $5, %xmm3, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   vpextrb $6, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $6, %xmm3, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $5, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $7, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $7, %xmm3, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   vpinsrb $6, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $7, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $8, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $8, %xmm3, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $8, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $9, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $9, %xmm3, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   vpextrb $10, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $10, %xmm3, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $9, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $11, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $11, %xmm3, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   vpinsrb $10, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $11, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $12, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $12, %xmm3, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $12, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $13, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $13, %xmm3, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   vpextrb $14, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $14, %xmm3, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $13, %eax, %xmm4, %xmm4
-; CHECK-NEXT:   vpextrb $15, %xmm2, %ecx
-; CHECK-NEXT:   vpextrb $15, %xmm3, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   vpextrb $1, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $1, %xmm0, %esi
-; CHECK-NEXT:   shrb %cl, %sil
-; CHECK-NEXT:   movzbl %dl, %ecx
-; CHECK-NEXT:   vpinsrb $14, %ecx, %xmm4, %xmm2
-; CHECK-NEXT:   vpextrb $0, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $0, %xmm0, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   vpextrb $2, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $2, %xmm0, %edi
-; CHECK-NEXT:   shrb %cl, %dil
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $15, %eax, %xmm2, %xmm2
-; CHECK-NEXT:   movzbl %sil, %eax
-; CHECK-NEXT:   movzbl %dl, %ecx
-; CHECK-NEXT:   vmovd %ecx, %xmm3
-; CHECK-NEXT:   vpinsrb $1, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dil, %eax
-; CHECK-NEXT:   vpextrb $3, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $3, %xmm0, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   vpinsrb $2, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $3, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $4, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $4, %xmm0, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $4, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $5, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $5, %xmm0, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   vpextrb $6, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $6, %xmm0, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $5, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $7, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $7, %xmm0, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   vpinsrb $6, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $7, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $8, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $8, %xmm0, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $8, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $9, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $9, %xmm0, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   vpextrb $10, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $10, %xmm0, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $9, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $11, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $11, %xmm0, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   vpinsrb $10, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $11, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $12, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $12, %xmm0, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $12, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   vpextrb $13, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $13, %xmm0, %eax
-; CHECK-NEXT:   shrb %cl, %al
-; CHECK-NEXT:   vpextrb $14, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $14, %xmm0, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   movzbl %al, %eax
-; CHECK-NEXT:   vpinsrb $13, %eax, %xmm3, %xmm3
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpextrb $15, %xmm1, %ecx
-; CHECK-NEXT:   vpextrb $15, %xmm0, %edx
-; CHECK-NEXT:   shrb %cl, %dl
-; CHECK-NEXT:   vpinsrb $14, %eax, %xmm3, %xmm0
-; CHECK-NEXT:   movzbl %dl, %eax
-; CHECK-NEXT:   vpinsrb $15, %eax, %xmm0, %xmm0
-; CHECK-NEXT:   vinserti128 $1, %xmm2, %ymm0, %ymm0
+; CHECK:        vpsllw    $5, %ymm1, %ymm1
+; CHECK-NEXT:   vpsrlw    $4, %ymm0, %ymm2
+; CHECK-NEXT:   vpand     {{.*}}(%rip), %ymm2, %ymm2
+; CHECK-NEXT:   vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:   vpsrlw    $2, %ymm0, %ymm2
+; CHECK-NEXT:   vpand     {{.*}}(%rip), %ymm2, %ymm2
+; CHECK-NEXT:   vpaddb    %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:   vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:   vpsrlw    $1, %ymm0, %ymm2
+; CHECK-NEXT:   vpand     {{.*}}(%rip), %ymm2, %ymm2
+; CHECK-NEXT:   vpaddb    %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:   vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:   retq
   %lshr = lshr <32 x i8> %r, %a
   ret <32 x i8> %lshr

Modified: llvm/trunk/test/CodeGen/X86/vec_shift8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shift8.ll?rev=239509&r1=239508&r2=239509&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shift8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shift8.ll Thu Jun 11 02:46:37 2015
@@ -8,114 +8,83 @@
 
 define <2 x i64> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp {
 entry:
-; SSE2:       pextrw $7, %xmm0, %eax
-; SSE2-NEXT:  pextrw $7, %xmm1, %ecx
-; SSE2-NEXT:  shll %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm2
-; SSE2-NEXT:  pextrw $3, %xmm0, %eax
-; SSE2-NEXT:  pextrw $3, %xmm1, %ecx
-; SSE2-NEXT:  shll %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT:  pextrw $5, %xmm0, %eax
-; SSE2-NEXT:  pextrw $5, %xmm1, %ecx
-; SSE2-NEXT:  shll %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm4
-; SSE2-NEXT:  pextrw $1, %xmm0, %eax
-; SSE2-NEXT:  pextrw $1, %xmm1, %ecx
-; SSE2-NEXT:  shll %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm2
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT:  pextrw $6, %xmm0, %eax
-; SSE2-NEXT:  pextrw $6, %xmm1, %ecx
-; SSE2-NEXT:  shll %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  pextrw $2, %xmm0, %eax
-; SSE2-NEXT:  pextrw $2, %xmm1, %ecx
-; SSE2-NEXT:  shll %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm4
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT:  pextrw $4, %xmm0, %eax
-; SSE2-NEXT:  pextrw $4, %xmm1, %ecx
-; SSE2-NEXT:  shll %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  movd %xmm0, %eax
-; SSE2-NEXT:  movd %xmm1, %ecx
-; SSE2-NEXT:  shll %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm0
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; ALL-NOT: shll
+;
+; SSE2:       psllw   $12, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psllw   $8, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psllw   $4, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psllw   $2, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  psraw   $15, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  pandn   %xmm0, %xmm2
+; SSE2-NEXT:  psllw   $1, %xmm0
+; SSE2-NEXT:  pand    %xmm1, %xmm0
+; SSE2-NEXT:  por     %xmm2, %xmm0
 ; SSE2-NEXT:  retq
 ;
-; SSE41:      pextrw $1, %xmm0, %eax
-; SSE41-NEXT: pextrw $1, %xmm1, %ecx
-; SSE41-NEXT: shll %cl, %eax
-; SSE41-NEXT: movd %xmm0, %edx
-; SSE41-NEXT: movd %xmm1, %ecx
-; SSE41-NEXT: shll %cl, %edx
-; SSE41-NEXT: movd %edx, %xmm2
-; SSE41-NEXT: pinsrw $1, %eax, %xmm2
-; SSE41-NEXT: pextrw $2, %xmm0, %eax
-; SSE41-NEXT: pextrw $2, %xmm1, %ecx
-; SSE41-NEXT: shll %cl, %eax
-; SSE41-NEXT: pinsrw $2, %eax, %xmm2
-; SSE41-NEXT: pextrw $3, %xmm0, %eax
-; SSE41-NEXT: pextrw $3, %xmm1, %ecx
-; SSE41-NEXT: shll %cl, %eax
-; SSE41-NEXT: pinsrw $3, %eax, %xmm2
-; SSE41-NEXT: pextrw $4, %xmm0, %eax
-; SSE41-NEXT: pextrw $4, %xmm1, %ecx
-; SSE41-NEXT: shll %cl, %eax
-; SSE41-NEXT: pinsrw $4, %eax, %xmm2
-; SSE41-NEXT: pextrw $5, %xmm0, %eax
-; SSE41-NEXT: pextrw $5, %xmm1, %ecx
-; SSE41-NEXT: shll %cl, %eax
-; SSE41-NEXT: pinsrw $5, %eax, %xmm2
-; SSE41-NEXT: pextrw $6, %xmm0, %eax
-; SSE41-NEXT: pextrw $6, %xmm1, %ecx
-; SSE41-NEXT: shll %cl, %eax
-; SSE41-NEXT: pinsrw $6, %eax, %xmm2
-; SSE41-NEXT: pextrw $7, %xmm0, %eax
-; SSE41-NEXT: pextrw $7, %xmm1, %ecx
-; SSE41-NEXT: shll %cl, %eax
-; SSE41-NEXT: pinsrw $7, %eax, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41:      movdqa   %xmm0, %xmm2
+; SSE41-NEXT: movdqa   %xmm1, %xmm0
+; SSE41-NEXT: psllw    $12, %xmm0
+; SSE41-NEXT: psllw    $4, %xmm1
+; SSE41-NEXT: por      %xmm0, %xmm1
+; SSE41-NEXT: movdqa   %xmm1, %xmm3
+; SSE41-NEXT: paddw    %xmm3, %xmm3
+; SSE41-NEXT: movdqa   %xmm2, %xmm4
+; SSE41-NEXT: psllw    $8, %xmm4
+; SSE41-NEXT: movdqa   %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm1
+; SSE41-NEXT: psllw    $4, %xmm1
+; SSE41-NEXT: movdqa   %xmm3, %xmm0
+; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm1
+; SSE41-NEXT: psllw    $2, %xmm1
+; SSE41-NEXT: paddw    %xmm3, %xmm3
+; SSE41-NEXT: movdqa   %xmm3, %xmm0
+; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm1
+; SSE41-NEXT: psllw    $1, %xmm1
+; SSE41-NEXT: paddw    %xmm3, %xmm3
+; SSE41-NEXT: movdqa   %xmm3, %xmm0
+; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm0
 ; SSE41-NEXT: retq
 ;
-; AVX:        vpextrw $1, %xmm0, %eax
-; AVX-NEXT:   vpextrw $1, %xmm1, %ecx
-; AVX-NEXT:   shll %cl, %eax
-; AVX-NEXT:   vmovd %xmm0, %edx
-; AVX-NEXT:   vmovd %xmm1, %ecx
-; AVX-NEXT:   shll %cl, %edx
-; AVX-NEXT:   vmovd %edx, %xmm2
-; AVX-NEXT:   vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $2, %xmm0, %eax
-; AVX-NEXT:   vpextrw $2, %xmm1, %ecx
-; AVX-NEXT:   shll %cl, %eax
-; AVX-NEXT:   vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $3, %xmm0, %eax
-; AVX-NEXT:   vpextrw $3, %xmm1, %ecx
-; AVX-NEXT:   shll %cl, %eax
-; AVX-NEXT:   vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $4, %xmm0, %eax
-; AVX-NEXT:   vpextrw $4, %xmm1, %ecx
-; AVX-NEXT:   shll %cl, %eax
-; AVX-NEXT:   vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $5, %xmm0, %eax
-; AVX-NEXT:   vpextrw $5, %xmm1, %ecx
-; AVX-NEXT:   shll %cl, %eax
-; AVX-NEXT:   vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $6, %xmm0, %eax
-; AVX-NEXT:   vpextrw $6, %xmm1, %ecx
-; AVX-NEXT:   shll %cl, %eax
-; AVX-NEXT:   vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $7, %xmm0, %eax
-; AVX-NEXT:   vpextrw $7, %xmm1, %ecx
-; AVX-NEXT:   shll %cl, %eax
-; AVX-NEXT:   vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX:        vpsllw    $12, %xmm1, %xmm2
+; AVX-NEXT:   vpsllw    $4, %xmm1, %xmm1
+; AVX-NEXT:   vpor      %xmm2, %xmm1, %xmm1
+; AVX-NEXT:   vpaddw    %xmm1, %xmm1, %xmm2
+; AVX-NEXT:   vpsllw    $8, %xmm0, %xmm3
+; AVX-NEXT:   vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT:   vpsllw    $4, %xmm0, %xmm1
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:   vpsllw    $2, %xmm0, %xmm1
+; AVX-NEXT:   vpaddw    %xmm2, %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:   vpsllw    $1, %xmm0, %xmm1
+; AVX-NEXT:   vpaddw    %xmm2, %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:   retq
   %shl = shl <8 x i16> %r, %a
   %tmp2 = bitcast <8 x i16> %shl to <2 x i64>
@@ -124,88 +93,66 @@ entry:
 
 define <2 x i64> @shl_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
 entry:
-; SSE2:       psllw $5, %xmm1
-; SSE2-NEXT:  pand {{.*}}(%rip), %xmm1
-; SSE2-NEXT:  movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE2-NEXT:  movdqa %xmm2, %xmm3
-; SSE2-NEXT:  pand %xmm1, %xmm3
-; SSE2-NEXT:  pcmpeqb %xmm2, %xmm3
-; SSE2-NEXT:  movdqa %xmm3, %xmm4
-; SSE2-NEXT:  pandn %xmm0, %xmm4
-; SSE2-NEXT:  psllw $4, %xmm0
-; SSE2-NEXT:  pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT:  pand %xmm3, %xmm0
-; SSE2-NEXT:  por %xmm4, %xmm0
-; SSE2-NEXT:  paddb %xmm1, %xmm1
-; SSE2-NEXT:  movdqa %xmm2, %xmm3
-; SSE2-NEXT:  pand %xmm1, %xmm3
-; SSE2-NEXT:  pcmpeqb %xmm2, %xmm3
-; SSE2-NEXT:  movdqa %xmm3, %xmm4
-; SSE2-NEXT:  pandn %xmm0, %xmm4
-; SSE2-NEXT:  psllw $2, %xmm0
-; SSE2-NEXT:  pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT:  pand %xmm3, %xmm0
-; SSE2-NEXT:  por %xmm4, %xmm0
-; SSE2-NEXT:  paddb %xmm1, %xmm1
-; SSE2-NEXT:  pand %xmm2, %xmm1
-; SSE2-NEXT:  pcmpeqb %xmm2, %xmm1
-; SSE2-NEXT:  movdqa %xmm1, %xmm2
-; SSE2-NEXT:  pandn %xmm0, %xmm2
-; SSE2-NEXT:  paddb %xmm0, %xmm0
-; SSE2-NEXT:  pand %xmm1, %xmm0
-; SSE2-NEXT:  por %xmm2, %xmm0
+; SSE2:       psllw   $5, %xmm1
+; SSE2-NEXT:  pxor    %xmm2, %xmm2
+; SSE2-NEXT:  pxor    %xmm3, %xmm3
+; SSE2-NEXT:  pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:  movdqa  %xmm3, %xmm4
+; SSE2-NEXT:  pandn   %xmm0, %xmm4
+; SSE2-NEXT:  psllw   $4, %xmm0
+; SSE2-NEXT:  pand    {{.*}}(%rip), %xmm0
+; SSE2-NEXT:  pand    %xmm3, %xmm0
+; SSE2-NEXT:  por     %xmm4, %xmm0
+; SSE2-NEXT:  paddb   %xmm1, %xmm1
+; SSE2-NEXT:  pxor    %xmm3, %xmm3
+; SSE2-NEXT:  pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:  movdqa  %xmm3, %xmm4
+; SSE2-NEXT:  pandn   %xmm0, %xmm4
+; SSE2-NEXT:  psllw   $2, %xmm0
+; SSE2-NEXT:  pand    {{.*}}(%rip), %xmm0
+; SSE2-NEXT:  pand    %xmm3, %xmm0
+; SSE2-NEXT:  por     %xmm4, %xmm0
+; SSE2-NEXT:  paddb   %xmm1, %xmm1
+; SSE2-NEXT:  pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm1
+; SSE2-NEXT:  pandn   %xmm0, %xmm1
+; SSE2-NEXT:  paddb   %xmm0, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm1, %xmm0
 ; SSE2-NEXT:  retq
 ;
-; SSE41:      movdqa %xmm0, %xmm2
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm5
-; SSE41-NEXT: paddb %xmm5, %xmm5
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pand %xmm5, %xmm4
-; SSE41-NEXT: pcmpeqb %xmm3, %xmm4
-; SSE41-NEXT: pand %xmm3, %xmm1
-; SSE41-NEXT: pcmpeqb %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm6
-; SSE41-NEXT: psllw $4, %xmm6
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm6
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm6, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: psllw $2, %xmm1
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: paddb %xmm1, %xmm1
-; SSE41-NEXT: paddb %xmm5, %xmm5
-; SSE41-NEXT: pand %xmm3, %xmm5
-; SSE41-NEXT: pcmpeqb %xmm5, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41:      movdqa   %xmm0, %xmm2
+; SSE41-NEXT: psllw    $5, %xmm1
+; SSE41-NEXT: movdqa   %xmm2, %xmm3
+; SSE41-NEXT: psllw    $4, %xmm3
+; SSE41-NEXT: pand     {{.*}}(%rip), %xmm3
+; SSE41-NEXT: movdqa   %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm3
+; SSE41-NEXT: psllw    $2, %xmm3
+; SSE41-NEXT: pand     {{.*}}(%rip), %xmm3
+; SSE41-NEXT: paddb    %xmm1, %xmm1
+; SSE41-NEXT: movdqa   %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm3
+; SSE41-NEXT: paddb    %xmm3, %xmm3
+; SSE41-NEXT: paddb    %xmm1, %xmm1
+; SSE41-NEXT: movdqa   %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm0
 ; SSE41-NEXT: retq
 ;
-; AVX:        vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT:   vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT:   vpaddb %xmm1, %xmm1, %xmm2
-; AVX-NEXT:   vmovdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX-NEXT:   vpand %xmm2, %xmm3, %xmm4
-; AVX-NEXT:   vpcmpeqb %xmm3, %xmm4, %xmm4
-; AVX-NEXT:   vpand %xmm1, %xmm3, %xmm1
-; AVX-NEXT:   vpcmpeqb %xmm3, %xmm1, %xmm1
-; AVX-NEXT:   vpsllw $4, %xmm0, %xmm5
-; AVX-NEXT:   vpand {{.*}}(%rip), %xmm5, %xmm5
-; AVX-NEXT:   vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
-; AVX-NEXT:   vpsllw $2, %xmm0, %xmm1
-; AVX-NEXT:   vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT:   vpblendvb %xmm4, %xmm1, %xmm0, %xmm0
-; AVX-NEXT:   vpaddb %xmm0, %xmm0, %xmm1
-; AVX-NEXT:   vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT:   vpand %xmm2, %xmm3, %xmm2
-; AVX-NEXT:   vpcmpeqb %xmm3, %xmm2, %xmm2
-; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX:        vpsllw    $5, %xmm1, %xmm1
+; AVX-NEXT:   vpsllw    $4, %xmm0, %xmm2
+; AVX-NEXT:   vpand     {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:   vpsllw    $2, %xmm0, %xmm2
+; AVX-NEXT:   vpand     {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:   vpaddb    %xmm1, %xmm1, %xmm1
+; AVX-NEXT:   vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:   vpaddb    %xmm0, %xmm0, %xmm2
+; AVX-NEXT:   vpaddb    %xmm1, %xmm1, %xmm1
+; AVX-NEXT:   vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:   retq
   %shl = shl <16 x i8> %r, %a
   %tmp2 = bitcast <16 x i8> %shl to <2 x i64>
@@ -214,114 +161,83 @@ entry:
 
 define <2 x i64> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp {
 entry:
-; SSE2:       pextrw $7, %xmm1, %ecx
-; SSE2-NEXT:  pextrw $7, %xmm0, %eax
-; SSE2-NEXT:  sarw %cl, %ax
-; SSE2-NEXT:  movd %eax, %xmm2
-; SSE2-NEXT:  pextrw $3, %xmm1, %ecx
-; SSE2-NEXT:  pextrw $3, %xmm0, %eax
-; SSE2-NEXT:  sarw %cl, %ax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT:  pextrw $5, %xmm1, %ecx
-; SSE2-NEXT:  pextrw $5, %xmm0, %eax
-; SSE2-NEXT:  sarw %cl, %ax
-; SSE2-NEXT:  movd %eax, %xmm4
-; SSE2-NEXT:  pextrw $1, %xmm1, %ecx
-; SSE2-NEXT:  pextrw $1, %xmm0, %eax
-; SSE2-NEXT:  sarw %cl, %ax
-; SSE2-NEXT:  movd %eax, %xmm2
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT:  pextrw $6, %xmm1, %ecx
-; SSE2-NEXT:  pextrw $6, %xmm0, %eax
-; SSE2-NEXT:  sarw %cl, %ax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  pextrw $2, %xmm1, %ecx
-; SSE2-NEXT:  pextrw $2, %xmm0, %eax
-; SSE2-NEXT:  sarw %cl, %ax
-; SSE2-NEXT:  movd %eax, %xmm4
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT:  pextrw $4, %xmm1, %ecx
-; SSE2-NEXT:  pextrw $4, %xmm0, %eax
-; SSE2-NEXT:  sarw %cl, %ax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  movd %xmm1, %ecx
-; SSE2-NEXT:  movd %xmm0, %eax
-; SSE2-NEXT:  sarw %cl, %ax
-; SSE2-NEXT:  movd %eax, %xmm0
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; ALL-NOT: sarw
+;
+; SSE2:       psllw   $12, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psraw   $8, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psraw   $4, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psraw   $2, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  psraw   $15, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  pandn   %xmm0, %xmm2
+; SSE2-NEXT:  psraw   $1, %xmm0
+; SSE2-NEXT:  pand    %xmm1, %xmm0
+; SSE2-NEXT:  por     %xmm2, %xmm0
 ; SSE2-NEXT:  retq
 ;
-; SSE41:      pextrw $1, %xmm1, %ecx
-; SSE41-NEXT: pextrw $1, %xmm0, %eax
-; SSE41-NEXT: sarw %cl, %ax
-; SSE41-NEXT: movd %xmm1, %ecx
-; SSE41-NEXT: movd %xmm0, %edx
-; SSE41-NEXT: sarw %cl, %dx
-; SSE41-NEXT: movd %edx, %xmm2
-; SSE41-NEXT: pinsrw $1, %eax, %xmm2
-; SSE41-NEXT: pextrw $2, %xmm1, %ecx
-; SSE41-NEXT: pextrw $2, %xmm0, %eax
-; SSE41-NEXT: sarw %cl, %ax
-; SSE41-NEXT: pinsrw $2, %eax, %xmm2
-; SSE41-NEXT: pextrw $3, %xmm1, %ecx
-; SSE41-NEXT: pextrw $3, %xmm0, %eax
-; SSE41-NEXT: sarw %cl, %ax
-; SSE41-NEXT: pinsrw $3, %eax, %xmm2
-; SSE41-NEXT: pextrw $4, %xmm1, %ecx
-; SSE41-NEXT: pextrw $4, %xmm0, %eax
-; SSE41-NEXT: sarw %cl, %ax
-; SSE41-NEXT: pinsrw $4, %eax, %xmm2
-; SSE41-NEXT: pextrw $5, %xmm1, %ecx
-; SSE41-NEXT: pextrw $5, %xmm0, %eax
-; SSE41-NEXT: sarw %cl, %ax
-; SSE41-NEXT: pinsrw $5, %eax, %xmm2
-; SSE41-NEXT: pextrw $6, %xmm1, %ecx
-; SSE41-NEXT: pextrw $6, %xmm0, %eax
-; SSE41-NEXT: sarw %cl, %ax
-; SSE41-NEXT: pinsrw $6, %eax, %xmm2
-; SSE41-NEXT: pextrw $7, %xmm1, %ecx
-; SSE41-NEXT: pextrw $7, %xmm0, %eax
-; SSE41-NEXT: sarw %cl, %ax
-; SSE41-NEXT: pinsrw $7, %eax, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41:      movdqa    %xmm0, %xmm2
+; SSE41-NEXT: movdqa    %xmm1, %xmm0
+; SSE41-NEXT: psllw     $12, %xmm0
+; SSE41-NEXT: psllw     $4, %xmm1
+; SSE41-NEXT: por       %xmm0, %xmm1
+; SSE41-NEXT: movdqa    %xmm1, %xmm3
+; SSE41-NEXT: paddw     %xmm3, %xmm3
+; SSE41-NEXT: movdqa    %xmm2, %xmm4
+; SSE41-NEXT: psraw     $8, %xmm4
+; SSE41-NEXT: movdqa    %xmm1, %xmm0
+; SSE41-NEXT: pblendvb  %xmm4, %xmm2
+; SSE41-NEXT: movdqa    %xmm2, %xmm1
+; SSE41-NEXT: psraw     $4, %xmm1
+; SSE41-NEXT: movdqa    %xmm3, %xmm0
+; SSE41-NEXT: pblendvb  %xmm1, %xmm2
+; SSE41-NEXT: movdqa    %xmm2, %xmm1
+; SSE41-NEXT: psraw     $2, %xmm1
+; SSE41-NEXT: paddw     %xmm3, %xmm3
+; SSE41-NEXT: movdqa    %xmm3, %xmm0
+; SSE41-NEXT: pblendvb  %xmm1, %xmm2
+; SSE41-NEXT: movdqa    %xmm2, %xmm1
+; SSE41-NEXT: psraw     $1, %xmm1
+; SSE41-NEXT: paddw     %xmm3, %xmm3
+; SSE41-NEXT: movdqa    %xmm3, %xmm0
+; SSE41-NEXT: pblendvb  %xmm1, %xmm2
+; SSE41-NEXT: movdqa    %xmm2, %xmm0
 ; SSE41-NEXT: retq
 ;
-; AVX:        vpextrw $1, %xmm1, %ecx
-; AVX-NEXT:   vpextrw $1, %xmm0, %eax
-; AVX-NEXT:   sarw %cl, %ax
-; AVX-NEXT:   vmovd %xmm1, %ecx
-; AVX-NEXT:   vmovd %xmm0, %edx
-; AVX-NEXT:   sarw %cl, %dx
-; AVX-NEXT:   vmovd %edx, %xmm2
-; AVX-NEXT:   vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $2, %xmm1, %ecx
-; AVX-NEXT:   vpextrw $2, %xmm0, %eax
-; AVX-NEXT:   sarw %cl, %ax
-; AVX-NEXT:   vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $3, %xmm1, %ecx
-; AVX-NEXT:   vpextrw $3, %xmm0, %eax
-; AVX-NEXT:   sarw %cl, %ax
-; AVX-NEXT:   vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $4, %xmm1, %ecx
-; AVX-NEXT:   vpextrw $4, %xmm0, %eax
-; AVX-NEXT:   sarw %cl, %ax
-; AVX-NEXT:   vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $5, %xmm1, %ecx
-; AVX-NEXT:   vpextrw $5, %xmm0, %eax
-; AVX-NEXT:   sarw %cl, %ax
-; AVX-NEXT:   vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $6, %xmm1, %ecx
-; AVX-NEXT:   vpextrw $6, %xmm0, %eax
-; AVX-NEXT:   sarw %cl, %ax
-; AVX-NEXT:   vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $7, %xmm1, %ecx
-; AVX-NEXT:   vpextrw $7, %xmm0, %eax
-; AVX-NEXT:   sarw %cl, %ax
-; AVX-NEXT:   vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX:        vpsllw    $12, %xmm1, %xmm2
+; AVX-NEXT:   vpsllw    $4, %xmm1, %xmm1
+; AVX-NEXT:   vpor      %xmm2, %xmm1, %xmm1
+; AVX-NEXT:   vpaddw    %xmm1, %xmm1, %xmm2
+; AVX-NEXT:   vpsraw    $8, %xmm0, %xmm3
+; AVX-NEXT:   vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT:   vpsraw    $4, %xmm0, %xmm1
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:   vpsraw    $2, %xmm0, %xmm1
+; AVX-NEXT:   vpaddw    %xmm2, %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:   vpsraw    $1, %xmm0, %xmm1
+; AVX-NEXT:   vpaddw    %xmm2, %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:   retq
   %ashr = ashr <8 x i16> %r, %a
   %tmp2 = bitcast <8 x i16> %ashr to <2 x i64>
@@ -330,282 +246,122 @@ entry:
 
 define <2 x i64> @ashr_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
 entry:
+; ALL-NOT: sarb
 ;
-; SSE2:       pushq %rbp
-; SSE2-NEXT:  pushq %r15
-; SSE2-NEXT:  pushq %r14
-; SSE2-NEXT:  pushq %r13
-; SSE2-NEXT:  pushq %r12
-; SSE2-NEXT:  pushq %rbx
-; SSE2-NEXT:  movaps %xmm1, -24(%rsp)
-; SSE2-NEXT:  movaps %xmm0, -40(%rsp)
-; SSE2-NEXT:  movb -9(%rsp), %cl
-; SSE2-NEXT:  movb -25(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movzbl %al, %eax
-; SSE2-NEXT:  movd %eax, %xmm0
-; SSE2-NEXT:  movb -17(%rsp), %cl
-; SSE2-NEXT:  movb -33(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -13(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %eax
-; SSE2-NEXT:  movl %eax, -44(%rsp)
-; SSE2-NEXT:  movb -29(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movzbl %al, %r9d
-; SSE2-NEXT:  movb -21(%rsp), %cl
-; SSE2-NEXT:  movb -37(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -11(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r10d
-; SSE2-NEXT:  movb -27(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -19(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r11d
-; SSE2-NEXT:  movb -35(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -15(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r14d
-; SSE2-NEXT:  movb -31(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movzbl %al, %r15d
-; SSE2-NEXT:  movb -23(%rsp), %cl
-; SSE2-NEXT:  movb -39(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -10(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r12d
-; SSE2-NEXT:  movb -26(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -18(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r13d
-; SSE2-NEXT:  movb -34(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -14(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r8d
-; SSE2-NEXT:  movb -30(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -22(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %ebp
-; SSE2-NEXT:  movb -38(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movb -12(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %edi
-; SSE2-NEXT:  movb -28(%rsp), %dl
-; SSE2-NEXT:  sarb %cl, %dl
-; SSE2-NEXT:  movb -20(%rsp), %cl
-; SSE2-NEXT:  movzbl %dl, %esi
-; SSE2-NEXT:  movb -36(%rsp), %bl
-; SSE2-NEXT:  sarb %cl, %bl
-; SSE2-NEXT:  movb -16(%rsp), %cl
-; SSE2-NEXT:  movzbl %bl, %ebx
-; SSE2-NEXT:  movb -32(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movzbl %al, %edx
-; SSE2-NEXT:  movb -24(%rsp), %cl
-; SSE2-NEXT:  movb -40(%rsp), %al
-; SSE2-NEXT:  sarb %cl, %al
-; SSE2-NEXT:  movzbl %al, %eax
-; SSE2-NEXT:  movd -44(%rsp), %xmm1
-; SSE2:       movd %r9d, %xmm2
-; SSE2-NEXT:  movd %r10d, %xmm3
-; SSE2-NEXT:  movd %r11d, %xmm4
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:  movd %r14d, %xmm0
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT:  movd %r15d, %xmm1
-; SSE2-NEXT:  movd %r12d, %xmm2
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE2-NEXT:  movd %r13d, %xmm0
-; SSE2-NEXT:  movd %r8d, %xmm1
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:  movd %ebp, %xmm0
-; SSE2-NEXT:  movd %edi, %xmm3
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-NEXT:  movd %esi, %xmm0
-; SSE2-NEXT:  movd %ebx, %xmm1
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:  movd %edx, %xmm4
-; SSE2-NEXT:  movd %eax, %xmm0
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT:  popq %rbx
-; SSE2-NEXT:  popq %r12
-; SSE2-NEXT:  popq %r13
-; SSE2-NEXT:  popq %r14
-; SSE2-NEXT:  popq %r15
-; SSE2-NEXT:  popq %rbp
+; SSE2:       punpckhbw {{.*#}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE2-NEXT:  psllw     $5, %xmm1
+; SSE2-NEXT:  punpckhbw {{.*#}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
+; SSE2-NEXT:  pxor      %xmm3, %xmm3
+; SSE2-NEXT:  pxor      %xmm5, %xmm5
+; SSE2-NEXT:  pcmpgtw   %xmm4, %xmm5
+; SSE2-NEXT:  movdqa    %xmm5, %xmm6
+; SSE2-NEXT:  pandn     %xmm2, %xmm6
+; SSE2-NEXT:  psraw     $4, %xmm2
+; SSE2-NEXT:  pand      %xmm5, %xmm2
+; SSE2-NEXT:  por       %xmm6, %xmm2
+; SSE2-NEXT:  paddw     %xmm4, %xmm4
+; SSE2-NEXT:  pxor      %xmm5, %xmm5
+; SSE2-NEXT:  pcmpgtw   %xmm4, %xmm5
+; SSE2-NEXT:  movdqa    %xmm5, %xmm6
+; SSE2-NEXT:  pandn     %xmm2, %xmm6
+; SSE2-NEXT:  psraw     $2, %xmm2
+; SSE2-NEXT:  pand      %xmm5, %xmm2
+; SSE2-NEXT:  por       %xmm6, %xmm2
+; SSE2-NEXT:  paddw     %xmm4, %xmm4
+; SSE2-NEXT:  pxor      %xmm5, %xmm5
+; SSE2-NEXT:  pcmpgtw   %xmm4, %xmm5
+; SSE2-NEXT:  movdqa    %xmm5, %xmm4
+; SSE2-NEXT:  pandn     %xmm2, %xmm4
+; SSE2-NEXT:  psraw     $1, %xmm2
+; SSE2-NEXT:  pand      %xmm5, %xmm2
+; SSE2-NEXT:  por       %xmm4, %xmm2
+; SSE2-NEXT:  psrlw     $8, %xmm2
+; SSE2-NEXT:  punpcklbw {{.*#}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:  punpcklbw {{.*#}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:  pxor      %xmm4, %xmm4
+; SSE2-NEXT:  pcmpgtw   %xmm1, %xmm4
+; SSE2-NEXT:  movdqa    %xmm4, %xmm5
+; SSE2-NEXT:  pandn     %xmm0, %xmm5
+; SSE2-NEXT:  psraw     $4, %xmm0
+; SSE2-NEXT:  pand      %xmm4, %xmm0
+; SSE2-NEXT:  por       %xmm5, %xmm0
+; SSE2-NEXT:  paddw     %xmm1, %xmm1
+; SSE2-NEXT:  pxor      %xmm4, %xmm4
+; SSE2-NEXT:  pcmpgtw   %xmm1, %xmm4
+; SSE2-NEXT:  movdqa    %xmm4, %xmm5
+; SSE2-NEXT:  pandn     %xmm0, %xmm5
+; SSE2-NEXT:  psraw     $2, %xmm0
+; SSE2-NEXT:  pand      %xmm4, %xmm0
+; SSE2-NEXT:  por       %xmm5, %xmm0
+; SSE2-NEXT:  paddw     %xmm1, %xmm1
+; SSE2-NEXT:  pcmpgtw   %xmm1, %xmm3
+; SSE2-NEXT:  movdqa    %xmm3, %xmm1
+; SSE2-NEXT:  pandn     %xmm0, %xmm1
+; SSE2-NEXT:  psraw     $1, %xmm0
+; SSE2-NEXT:  pand      %xmm3, %xmm0
+; SSE2-NEXT:  por       %xmm1, %xmm0
+; SSE2-NEXT:  psrlw     $8, %xmm0
+; SSE2-NEXT:  packuswb  %xmm2, %xmm0
 ; SSE2-NEXT:  retq
 ;
-; SSE41:      pextrb $1, %xmm1, %ecx
-; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pextrb $0, %xmm1, %ecx
-; SSE41-NEXT: pextrb $0, %xmm0, %edx
-; SSE41-NEXT: sarb %cl, %dl
-; SSE41-NEXT: movzbl %dl, %ecx
-; SSE41-NEXT: movd %ecx, %xmm2
-; SSE41-NEXT: pinsrb $1, %eax, %xmm2
-; SSE41-NEXT: pextrb $2, %xmm1, %ecx
-; SSE41-NEXT: pextrb $2, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $2, %eax, %xmm2
-; SSE41-NEXT: pextrb $3, %xmm1, %ecx
-; SSE41-NEXT: pextrb $3, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $3, %eax, %xmm2
-; SSE41-NEXT: pextrb $4, %xmm1, %ecx
-; SSE41-NEXT: pextrb $4, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $4, %eax, %xmm2
-; SSE41-NEXT: pextrb $5, %xmm1, %ecx
-; SSE41-NEXT: pextrb $5, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $5, %eax, %xmm2
-; SSE41-NEXT: pextrb $6, %xmm1, %ecx
-; SSE41-NEXT: pextrb $6, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $6, %eax, %xmm2
-; SSE41-NEXT: pextrb $7, %xmm1, %ecx
-; SSE41-NEXT: pextrb $7, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $7, %eax, %xmm2
-; SSE41-NEXT: pextrb $8, %xmm1, %ecx
-; SSE41-NEXT: pextrb $8, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $8, %eax, %xmm2
-; SSE41-NEXT: pextrb $9, %xmm1, %ecx
-; SSE41-NEXT: pextrb $9, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $9, %eax, %xmm2
-; SSE41-NEXT: pextrb $10, %xmm1, %ecx
-; SSE41-NEXT: pextrb $10, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $10, %eax, %xmm2
-; SSE41-NEXT: pextrb $11, %xmm1, %ecx
-; SSE41-NEXT: pextrb $11, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $11, %eax, %xmm2
-; SSE41-NEXT: pextrb $12, %xmm1, %ecx
-; SSE41-NEXT: pextrb $12, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $12, %eax, %xmm2
-; SSE41-NEXT: pextrb $13, %xmm1, %ecx
-; SSE41-NEXT: pextrb $13, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $13, %eax, %xmm2
-; SSE41-NEXT: pextrb $14, %xmm1, %ecx
-; SSE41-NEXT: pextrb $14, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $14, %eax, %xmm2
-; SSE41-NEXT: pextrb $15, %xmm1, %ecx
-; SSE41-NEXT: pextrb $15, %xmm0, %eax
-; SSE41-NEXT: sarb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $15, %eax, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41:      movdqa    %xmm0, %xmm2
+; SSE41-NEXT: psllw     $5, %xmm1
+; SSE41-NEXT: punpckhbw {{.*#}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE41-NEXT: punpckhbw {{.*#}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE41-NEXT: movdqa    %xmm3, %xmm4
+; SSE41-NEXT: psraw     $4, %xmm4
+; SSE41-NEXT: pblendvb  %xmm4, %xmm3
+; SSE41-NEXT: movdqa    %xmm3, %xmm4
+; SSE41-NEXT: psraw     $2, %xmm4
+; SSE41-NEXT: paddw     %xmm0, %xmm0
+; SSE41-NEXT: pblendvb  %xmm4, %xmm3
+; SSE41-NEXT: movdqa    %xmm3, %xmm4
+; SSE41-NEXT: psraw     $1, %xmm4
+; SSE41-NEXT: paddw     %xmm0, %xmm0
+; SSE41-NEXT: pblendvb  %xmm4, %xmm3
+; SSE41-NEXT: psrlw     $8, %xmm3
+; SSE41-NEXT: punpcklbw {{.*#}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE41-NEXT: punpcklbw {{.*#}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE41-NEXT: movdqa    %xmm1, %xmm2
+; SSE41-NEXT: psraw     $4, %xmm2
+; SSE41-NEXT: pblendvb  %xmm2, %xmm1
+; SSE41-NEXT: movdqa    %xmm1, %xmm2
+; SSE41-NEXT: psraw     $2, %xmm2
+; SSE41-NEXT: paddw     %xmm0, %xmm0
+; SSE41-NEXT: pblendvb  %xmm2, %xmm1
+; SSE41-NEXT: movdqa    %xmm1, %xmm2
+; SSE41-NEXT: psraw     $1, %xmm2
+; SSE41-NEXT: paddw     %xmm0, %xmm0
+; SSE41-NEXT: pblendvb  %xmm2, %xmm1
+; SSE41-NEXT: psrlw     $8, %xmm1
+; SSE41-NEXT: packuswb  %xmm3, %xmm1
+; SSE41-NEXT: movdqa    %xmm1, %xmm0
 ; SSE41-NEXT: retq
 ;
-; AVX:        vpextrb $1, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $1, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpextrb $0, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $0, %xmm0, %edx
-; AVX-NEXT:   sarb %cl, %dl
-; AVX-NEXT:   movzbl %dl, %ecx
-; AVX-NEXT:   vmovd %ecx, %xmm2
-; AVX-NEXT:   vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $2, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $2, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $3, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $3, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $4, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $4, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $5, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $5, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $6, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $6, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $7, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $7, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $8, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $8, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $9, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $9, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $10, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $10, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $11, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $11, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $12, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $12, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $13, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $13, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $14, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $14, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $15, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $15, %xmm0, %eax
-; AVX-NEXT:   sarb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX:        vpsllw     $5, %xmm1, %xmm1
+; AVX-NEXT:   vpunpckhbw {{.*#}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX-NEXT:   vpunpckhbw {{.*#}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX-NEXT:   vpsraw     $4, %xmm3, %xmm4
+; AVX-NEXT:   vpblendvb  %xmm2, %xmm4, %xmm3, %xmm3
+; AVX-NEXT:   vpsraw     $2, %xmm3, %xmm4
+; AVX-NEXT:   vpaddw     %xmm2, %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb  %xmm2, %xmm4, %xmm3, %xmm3
+; AVX-NEXT:   vpsraw     $1, %xmm3, %xmm4
+; AVX-NEXT:   vpaddw     %xmm2, %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb  %xmm2, %xmm4, %xmm3, %xmm2
+; AVX-NEXT:   vpsrlw     $8, %xmm2, %xmm2
+; AVX-NEXT:   vpunpcklbw {{.*#}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX-NEXT:   vpunpcklbw {{.*#}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX-NEXT:   vpsraw     $4, %xmm0, %xmm3
+; AVX-NEXT:   vpblendvb  %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT:   vpsraw     $2, %xmm0, %xmm3
+; AVX-NEXT:   vpaddw     %xmm1, %xmm1, %xmm1
+; AVX-NEXT:   vpblendvb  %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT:   vpsraw     $1, %xmm0, %xmm3
+; AVX-NEXT:   vpaddw     %xmm1, %xmm1, %xmm1
+; AVX-NEXT:   vpblendvb  %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT:   vpsrlw     $8, %xmm0, %xmm0
+; AVX-NEXT:   vpackuswb  %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:   retq
   %ashr = ashr <16 x i8> %r, %a
   %tmp2 = bitcast <16 x i8> %ashr to <2 x i64>
@@ -614,118 +370,83 @@ entry:
 
 define <2 x i64> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp {
 entry:
-
-; SSE2:       pextrw $7, %xmm0, %eax
-; SSE2-NEXT:  pextrw $7, %xmm1, %ecx
-; SSE2-NEXT:  shrl %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm2
-; SSE2-NEXT:  pextrw $3, %xmm0, %eax
-; SSE2-NEXT:  pextrw $3, %xmm1, %ecx
-; SSE2-NEXT:  shrl %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT:  pextrw $5, %xmm0, %eax
-; SSE2-NEXT:  pextrw $5, %xmm1, %ecx
-; SSE2-NEXT:  shrl %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm4
-; SSE2-NEXT:  pextrw $1, %xmm0, %eax
-; SSE2-NEXT:  pextrw $1, %xmm1, %ecx
-; SSE2-NEXT:  shrl %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm2
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT:  pextrw $6, %xmm0, %eax
-; SSE2-NEXT:  pextrw $6, %xmm1, %ecx
-; SSE2-NEXT:  shrl %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  pextrw $2, %xmm0, %eax
-; SSE2-NEXT:  pextrw $2, %xmm1, %ecx
-; SSE2-NEXT:  shrl %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm4
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT:  pextrw $4, %xmm0, %eax
-; SSE2-NEXT:  pextrw $4, %xmm1, %ecx
-; SSE2-NEXT:  shrl %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm3
-; SSE2-NEXT:  movd %xmm1, %ecx
-; SSE2-NEXT:  movd %xmm0, %eax
-; SSE2-NEXT:  movzwl %ax, %eax
-; SSE2-NEXT:  shrl %cl, %eax
-; SSE2-NEXT:  movd %eax, %xmm0
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT:  punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; ALL-NOT: shrl
+;
+; SSE2:       psllw   $12, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psrlw   $8, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psrlw   $4, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  psraw   $15, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm3
+; SSE2-NEXT:  pandn   %xmm0, %xmm3
+; SSE2-NEXT:  psrlw   $2, %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm3, %xmm0
+; SSE2-NEXT:  paddw   %xmm1, %xmm1
+; SSE2-NEXT:  psraw   $15, %xmm1
+; SSE2-NEXT:  movdqa  %xmm1, %xmm2
+; SSE2-NEXT:  pandn   %xmm0, %xmm2
+; SSE2-NEXT:  psrlw   $1, %xmm0
+; SSE2-NEXT:  pand    %xmm1, %xmm0
+; SSE2-NEXT:  por     %xmm2, %xmm0
 ; SSE2-NEXT:  retq
 ;
-; SSE41:      pextrw $1, %xmm0, %eax
-; SSE41-NEXT: pextrw $1, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: movd %xmm1, %ecx
-; SSE41-NEXT: movd %xmm0, %edx
-; SSE41-NEXT: movzwl %dx, %edx
-; SSE41-NEXT: shrl %cl, %edx
-; SSE41-NEXT: movd %edx, %xmm2
-; SSE41-NEXT: pinsrw $1, %eax, %xmm2
-; SSE41-NEXT: pextrw $2, %xmm0, %eax
-; SSE41-NEXT: pextrw $2, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: pinsrw $2, %eax, %xmm2
-; SSE41-NEXT: pextrw $3, %xmm0, %eax
-; SSE41-NEXT: pextrw $3, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: pinsrw $3, %eax, %xmm2
-; SSE41-NEXT: pextrw $4, %xmm0, %eax
-; SSE41-NEXT: pextrw $4, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: pinsrw $4, %eax, %xmm2
-; SSE41-NEXT: pextrw $5, %xmm0, %eax
-; SSE41-NEXT: pextrw $5, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: pinsrw $5, %eax, %xmm2
-; SSE41-NEXT: pextrw $6, %xmm0, %eax
-; SSE41-NEXT: pextrw $6, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: pinsrw $6, %eax, %xmm2
-; SSE41-NEXT: pextrw $7, %xmm0, %eax
-; SSE41-NEXT: pextrw $7, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: pinsrw $7, %eax, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41:      movdqa    %xmm0, %xmm2
+; SSE41-NEXT: movdqa    %xmm1, %xmm0
+; SSE41-NEXT: psllw     $12, %xmm0
+; SSE41-NEXT: psllw     $4, %xmm1
+; SSE41-NEXT: por       %xmm0, %xmm1
+; SSE41-NEXT: movdqa    %xmm1, %xmm3
+; SSE41-NEXT: paddw     %xmm3, %xmm3
+; SSE41-NEXT: movdqa    %xmm2, %xmm4
+; SSE41-NEXT: psrlw     $8, %xmm4
+; SSE41-NEXT: movdqa    %xmm1, %xmm0
+; SSE41-NEXT: pblendvb  %xmm4, %xmm2
+; SSE41-NEXT: movdqa    %xmm2, %xmm1
+; SSE41-NEXT: psrlw     $4, %xmm1
+; SSE41-NEXT: movdqa    %xmm3, %xmm0
+; SSE41-NEXT: pblendvb  %xmm1, %xmm2
+; SSE41-NEXT: movdqa    %xmm2, %xmm1
+; SSE41-NEXT: psrlw     $2, %xmm1
+; SSE41-NEXT: paddw     %xmm3, %xmm3
+; SSE41-NEXT: movdqa    %xmm3, %xmm0
+; SSE41-NEXT: pblendvb  %xmm1, %xmm2
+; SSE41-NEXT: movdqa    %xmm2, %xmm1
+; SSE41-NEXT: psrlw     $1, %xmm1
+; SSE41-NEXT: paddw     %xmm3, %xmm3
+; SSE41-NEXT: movdqa    %xmm3, %xmm0
+; SSE41-NEXT: pblendvb  %xmm1, %xmm2
+; SSE41-NEXT: movdqa    %xmm2, %xmm0
 ; SSE41-NEXT: retq
 ;
-; AVX:        vpextrw $1, %xmm0, %eax
-; AVX-NEXT:   vpextrw $1, %xmm1, %ecx
-; AVX-NEXT:   shrl %cl, %eax
-; AVX-NEXT:   vmovd %xmm1, %ecx
-; AVX-NEXT:   vmovd %xmm0, %edx
-; AVX-NEXT:   movzwl %dx, %edx
-; AVX-NEXT:   shrl %cl, %edx
-; AVX-NEXT:   vmovd %edx, %xmm2
-; AVX-NEXT:   vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $2, %xmm0, %eax
-; AVX-NEXT:   vpextrw $2, %xmm1, %ecx
-; AVX-NEXT:   shrl %cl, %eax
-; AVX-NEXT:   vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $3, %xmm0, %eax
-; AVX-NEXT:   vpextrw $3, %xmm1, %ecx
-; AVX-NEXT:   shrl %cl, %eax
-; AVX-NEXT:   vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $4, %xmm0, %eax
-; AVX-NEXT:   vpextrw $4, %xmm1, %ecx
-; AVX-NEXT:   shrl %cl, %eax
-; AVX-NEXT:   vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $5, %xmm0, %eax
-; AVX-NEXT:   vpextrw $5, %xmm1, %ecx
-; AVX-NEXT:   shrl %cl, %eax
-; AVX-NEXT:   vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $6, %xmm0, %eax
-; AVX-NEXT:   vpextrw $6, %xmm1, %ecx
-; AVX-NEXT:   shrl %cl, %eax
-; AVX-NEXT:   vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrw $7, %xmm0, %eax
-; AVX-NEXT:   vpextrw $7, %xmm1, %ecx
-; AVX-NEXT:   shrl %cl, %eax
-; AVX-NEXT:   vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX:        vpsllw    $12, %xmm1, %xmm2
+; AVX-NEXT:   vpsllw    $4, %xmm1, %xmm1
+; AVX-NEXT:   vpor      %xmm2, %xmm1, %xmm1
+; AVX-NEXT:   vpaddw    %xmm1, %xmm1, %xmm2
+; AVX-NEXT:   vpsrlw    $8, %xmm0, %xmm3
+; AVX-NEXT:   vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT:   vpsrlw    $4, %xmm0, %xmm1
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:   vpsrlw    $2, %xmm0, %xmm1
+; AVX-NEXT:   vpaddw    %xmm2, %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:   vpsrlw    $1, %xmm0, %xmm1
+; AVX-NEXT:   vpaddw    %xmm2, %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:   retq
   %lshr = lshr <8 x i16> %r, %a
   %tmp2 = bitcast <8 x i16> %lshr to <2 x i64>
@@ -734,281 +455,71 @@ entry:
 
 define <2 x i64> @lshr_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
 entry:
-; SSE2:       pushq %rbp
-; SSE2-NEXT:  pushq %r15
-; SSE2-NEXT:  pushq %r14
-; SSE2-NEXT:  pushq %r13
-; SSE2-NEXT:  pushq %r12
-; SSE2-NEXT:  pushq %rbx
-; SSE2-NEXT:  movaps %xmm1, -24(%rsp)
-; SSE2-NEXT:  movaps %xmm0, -40(%rsp)
-; SSE2-NEXT:  movb -9(%rsp), %cl
-; SSE2-NEXT:  movb -25(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movzbl %al, %eax
-; SSE2-NEXT:  movd %eax, %xmm0
-; SSE2-NEXT:  movb -17(%rsp), %cl
-; SSE2-NEXT:  movb -33(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -13(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %eax
-; SSE2-NEXT:  movl %eax, -44(%rsp)
-; SSE2-NEXT:  movb -29(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movzbl %al, %r9d
-; SSE2-NEXT:  movb -21(%rsp), %cl
-; SSE2-NEXT:  movb -37(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -11(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r10d
-; SSE2-NEXT:  movb -27(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -19(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r11d
-; SSE2-NEXT:  movb -35(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -15(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r14d
-; SSE2-NEXT:  movb -31(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movzbl %al, %r15d
-; SSE2-NEXT:  movb -23(%rsp), %cl
-; SSE2-NEXT:  movb -39(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -10(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r12d
-; SSE2-NEXT:  movb -26(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -18(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r13d
-; SSE2-NEXT:  movb -34(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -14(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %r8d
-; SSE2-NEXT:  movb -30(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -22(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %ebp
-; SSE2-NEXT:  movb -38(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movb -12(%rsp), %cl
-; SSE2-NEXT:  movzbl %al, %edi
-; SSE2-NEXT:  movb -28(%rsp), %dl
-; SSE2-NEXT:  shrb %cl, %dl
-; SSE2-NEXT:  movb -20(%rsp), %cl
-; SSE2-NEXT:  movzbl %dl, %esi
-; SSE2-NEXT:  movb -36(%rsp), %bl
-; SSE2-NEXT:  shrb %cl, %bl
-; SSE2-NEXT:  movb -16(%rsp), %cl
-; SSE2-NEXT:  movzbl %bl, %ebx
-; SSE2-NEXT:  movb -32(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movzbl %al, %edx
-; SSE2-NEXT:  movb -24(%rsp), %cl
-; SSE2-NEXT:  movb -40(%rsp), %al
-; SSE2-NEXT:  shrb %cl, %al
-; SSE2-NEXT:  movzbl %al, %eax
-; SSE2-NEXT:  movd -44(%rsp), %xmm1
-; SSE2:       movd %r9d, %xmm2
-; SSE2-NEXT:  movd %r10d, %xmm3
-; SSE2-NEXT:  movd %r11d, %xmm4
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:  movd %r14d, %xmm0
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT:  movd %r15d, %xmm1
-; SSE2-NEXT:  movd %r12d, %xmm2
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE2-NEXT:  movd %r13d, %xmm0
-; SSE2-NEXT:  movd %r8d, %xmm1
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:  movd %ebp, %xmm0
-; SSE2-NEXT:  movd %edi, %xmm3
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-NEXT:  movd %esi, %xmm0
-; SSE2-NEXT:  movd %ebx, %xmm1
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:  movd %edx, %xmm4
-; SSE2-NEXT:  movd %eax, %xmm0
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT:  punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT:  popq %rbx
-; SSE2-NEXT:  popq %r12
-; SSE2-NEXT:  popq %r13
-; SSE2-NEXT:  popq %r14
-; SSE2-NEXT:  popq %r15
-; SSE2-NEXT:  popq %rbp
+; ALL-NOT: shrb
+;
+; SSE2:       psllw   $5, %xmm1
+; SSE2-NEXT:  pxor    %xmm2, %xmm2
+; SSE2-NEXT:  pxor    %xmm3, %xmm3
+; SSE2-NEXT:  pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:  movdqa  %xmm3, %xmm4
+; SSE2-NEXT:  pandn   %xmm0, %xmm4
+; SSE2-NEXT:  psrlw   $4, %xmm0
+; SSE2-NEXT:  pand    {{.*}}(%rip), %xmm0
+; SSE2-NEXT:  pand    %xmm3, %xmm0
+; SSE2-NEXT:  por     %xmm4, %xmm0
+; SSE2-NEXT:  paddb   %xmm1, %xmm1
+; SSE2-NEXT:  pxor    %xmm3, %xmm3
+; SSE2-NEXT:  pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:  movdqa  %xmm3, %xmm4
+; SSE2-NEXT:  pandn   %xmm0, %xmm4
+; SSE2-NEXT:  psrlw   $2, %xmm0
+; SSE2-NEXT:  pand    {{.*}}(%rip), %xmm0
+; SSE2-NEXT:  pand    %xmm3, %xmm0
+; SSE2-NEXT:  por     %xmm4, %xmm0
+; SSE2-NEXT:  paddb   %xmm1, %xmm1
+; SSE2-NEXT:  pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:  movdqa  %xmm2, %xmm1
+; SSE2-NEXT:  pandn   %xmm0, %xmm1
+; SSE2-NEXT:  psrlw   $1, %xmm0
+; SSE2-NEXT:  pand    {{.*}}(%rip), %xmm0
+; SSE2-NEXT:  pand    %xmm2, %xmm0
+; SSE2-NEXT:  por     %xmm1, %xmm0
 ; SSE2-NEXT:  retq
 ;
-; SSE41:      pextrb $1, %xmm1, %ecx
-; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pextrb $0, %xmm1, %ecx
-; SSE41-NEXT: pextrb $0, %xmm0, %edx
-; SSE41-NEXT: shrb %cl, %dl
-; SSE41-NEXT: movzbl %dl, %ecx
-; SSE41-NEXT: movd %ecx, %xmm2
-; SSE41-NEXT: pinsrb $1, %eax, %xmm2
-; SSE41-NEXT: pextrb $2, %xmm1, %ecx
-; SSE41-NEXT: pextrb $2, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $2, %eax, %xmm2
-; SSE41-NEXT: pextrb $3, %xmm1, %ecx
-; SSE41-NEXT: pextrb $3, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $3, %eax, %xmm2
-; SSE41-NEXT: pextrb $4, %xmm1, %ecx
-; SSE41-NEXT: pextrb $4, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $4, %eax, %xmm2
-; SSE41-NEXT: pextrb $5, %xmm1, %ecx
-; SSE41-NEXT: pextrb $5, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $5, %eax, %xmm2
-; SSE41-NEXT: pextrb $6, %xmm1, %ecx
-; SSE41-NEXT: pextrb $6, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $6, %eax, %xmm2
-; SSE41-NEXT: pextrb $7, %xmm1, %ecx
-; SSE41-NEXT: pextrb $7, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $7, %eax, %xmm2
-; SSE41-NEXT: pextrb $8, %xmm1, %ecx
-; SSE41-NEXT: pextrb $8, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $8, %eax, %xmm2
-; SSE41-NEXT: pextrb $9, %xmm1, %ecx
-; SSE41-NEXT: pextrb $9, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $9, %eax, %xmm2
-; SSE41-NEXT: pextrb $10, %xmm1, %ecx
-; SSE41-NEXT: pextrb $10, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $10, %eax, %xmm2
-; SSE41-NEXT: pextrb $11, %xmm1, %ecx
-; SSE41-NEXT: pextrb $11, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $11, %eax, %xmm2
-; SSE41-NEXT: pextrb $12, %xmm1, %ecx
-; SSE41-NEXT: pextrb $12, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $12, %eax, %xmm2
-; SSE41-NEXT: pextrb $13, %xmm1, %ecx
-; SSE41-NEXT: pextrb $13, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $13, %eax, %xmm2
-; SSE41-NEXT: pextrb $14, %xmm1, %ecx
-; SSE41-NEXT: pextrb $14, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $14, %eax, %xmm2
-; SSE41-NEXT: pextrb $15, %xmm1, %ecx
-; SSE41-NEXT: pextrb $15, %xmm0, %eax
-; SSE41-NEXT: shrb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $15, %eax, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41:      movdqa   %xmm0, %xmm2
+; SSE41-NEXT: psllw    $5, %xmm1
+; SSE41-NEXT: movdqa   %xmm2, %xmm3
+; SSE41-NEXT: psrlw    $4, %xmm3
+; SSE41-NEXT: pand     {{.*}}(%rip), %xmm3
+; SSE41-NEXT: movdqa   %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm3
+; SSE41-NEXT: psrlw    $2, %xmm3
+; SSE41-NEXT: pand     {{.*}}(%rip), %xmm3
+; SSE41-NEXT: paddb    %xmm1, %xmm1
+; SSE41-NEXT: movdqa   %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm3
+; SSE41-NEXT: psrlw    $1, %xmm3
+; SSE41-NEXT: pand     {{.*}}(%rip), %xmm3
+; SSE41-NEXT: paddb    %xmm1, %xmm1
+; SSE41-NEXT: movdqa   %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: movdqa   %xmm2, %xmm0
 ; SSE41-NEXT: retq
 ;
-; AVX:        vpextrb $1, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $1, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpextrb $0, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $0, %xmm0, %edx
-; AVX-NEXT:   shrb %cl, %dl
-; AVX-NEXT:   movzbl %dl, %ecx
-; AVX-NEXT:   vmovd %ecx, %xmm2
-; AVX-NEXT:   vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $2, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $2, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $3, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $3, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $4, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $4, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $5, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $5, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $6, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $6, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $7, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $7, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $8, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $8, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $9, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $9, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $10, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $10, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $11, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $11, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $12, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $12, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $13, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $13, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $14, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $14, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX-NEXT:   vpextrb $15, %xmm1, %ecx
-; AVX-NEXT:   vpextrb $15, %xmm0, %eax
-; AVX-NEXT:   shrb %cl, %al
-; AVX-NEXT:   movzbl %al, %eax
-; AVX-NEXT:   vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX:        vpsllw    $5, %xmm1, %xmm1
+; AVX-NEXT:   vpsrlw    $4, %xmm0, %xmm2
+; AVX-NEXT:   vpand     {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:   vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:   vpsrlw    $2, %xmm0, %xmm2
+; AVX-NEXT:   vpand     {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:   vpaddb    %xmm1, %xmm1, %xmm1
+; AVX-NEXT:   vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:   vpsrlw    $1, %xmm0, %xmm2
+; AVX-NEXT:   vpand     {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:   vpaddb    %xmm1, %xmm1, %xmm1
+; AVX-NEXT:   vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:   retq
   %lshr = lshr <16 x i8> %r, %a
   %tmp2 = bitcast <16 x i8> %lshr to <2 x i64>





More information about the llvm-commits mailing list