[llvm] r248091 - [X86][SSE] Vectorize CTTZ + CTTZ_ZERO_UNDEF

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 19 06:22:57 PDT 2015


Author: rksimon
Date: Sat Sep 19 08:22:57 2015
New Revision: 248091

URL: http://llvm.org/viewvc/llvm-project?rev=248091&view=rev
Log:
[X86][SSE] Vectorize CTTZ + CTTZ_ZERO_UNDEF

Now that we have fast vector CTPOP implementations we can use this to speed up vector CTTZ using the pattern (cttz(x) = ctpop((x & -x) - 1))

Additionally, for AVX512CD that provides lzcnt instructions we can use the pattern (cttz_undef(x) = (width - 1) - ctlz(x & -x))

Differential Revision: http://reviews.llvm.org/D12663

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=248091&r1=248090&r2=248091&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Sep 19 08:22:57 2015
@@ -847,6 +847,15 @@ X86TargetLowering::X86TargetLowering(con
     setOperationAction(ISD::CTPOP,              MVT::v4i32, Custom);
     setOperationAction(ISD::CTPOP,              MVT::v2i64, Custom);
 
+    setOperationAction(ISD::CTTZ,               MVT::v16i8, Custom);
+    setOperationAction(ISD::CTTZ,               MVT::v8i16, Custom);
+    setOperationAction(ISD::CTTZ,               MVT::v4i32, Custom);
+    // ISD::CTTZ v2i64 - scalarization is faster.
+    setOperationAction(ISD::CTTZ_ZERO_UNDEF,    MVT::v16i8, Custom);
+    setOperationAction(ISD::CTTZ_ZERO_UNDEF,    MVT::v8i16, Custom);
+    setOperationAction(ISD::CTTZ_ZERO_UNDEF,    MVT::v4i32, Custom);
+    // ISD::CTTZ_ZERO_UNDEF v2i64 - scalarization is faster.
+
     // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
     for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
       MVT VT = (MVT::SimpleValueType)i;
@@ -1127,6 +1136,15 @@ X86TargetLowering::X86TargetLowering(con
     setOperationAction(ISD::CTPOP,             MVT::v8i32, Custom);
     setOperationAction(ISD::CTPOP,             MVT::v4i64, Custom);
 
+    setOperationAction(ISD::CTTZ,              MVT::v32i8, Custom);
+    setOperationAction(ISD::CTTZ,              MVT::v16i16, Custom);
+    setOperationAction(ISD::CTTZ,              MVT::v8i32, Custom);
+    setOperationAction(ISD::CTTZ,              MVT::v4i64, Custom);
+    setOperationAction(ISD::CTTZ_ZERO_UNDEF,   MVT::v32i8, Custom);
+    setOperationAction(ISD::CTTZ_ZERO_UNDEF,   MVT::v16i16, Custom);
+    setOperationAction(ISD::CTTZ_ZERO_UNDEF,   MVT::v8i32, Custom);
+    setOperationAction(ISD::CTTZ_ZERO_UNDEF,   MVT::v4i64, Custom);
+
     if (Subtarget->hasFMA() || Subtarget->hasFMA4() || Subtarget->hasAVX512()) {
       setOperationAction(ISD::FMA,             MVT::v8f32, Legal);
       setOperationAction(ISD::FMA,             MVT::v4f64, Legal);
@@ -1499,6 +1517,9 @@ X86TargetLowering::X86TargetLowering(con
       setOperationAction(ISD::CTLZ,             MVT::v16i32, Legal);
       setOperationAction(ISD::CTLZ_ZERO_UNDEF,  MVT::v8i64, Legal);
       setOperationAction(ISD::CTLZ_ZERO_UNDEF,  MVT::v16i32, Legal);
+
+      setOperationAction(ISD::CTTZ_ZERO_UNDEF,  MVT::v8i64, Custom);
+      setOperationAction(ISD::CTTZ_ZERO_UNDEF,  MVT::v16i32, Custom);
     }
     if (Subtarget->hasVLX() && Subtarget->hasCDI()) {
       setOperationAction(ISD::CTLZ,             MVT::v4i64, Legal);
@@ -1509,6 +1530,11 @@ X86TargetLowering::X86TargetLowering(con
       setOperationAction(ISD::CTLZ_ZERO_UNDEF,  MVT::v8i32, Legal);
       setOperationAction(ISD::CTLZ_ZERO_UNDEF,  MVT::v2i64, Legal);
       setOperationAction(ISD::CTLZ_ZERO_UNDEF,  MVT::v4i32, Legal);
+
+      setOperationAction(ISD::CTTZ_ZERO_UNDEF,  MVT::v4i64, Custom);
+      setOperationAction(ISD::CTTZ_ZERO_UNDEF,  MVT::v8i32, Custom);
+      setOperationAction(ISD::CTTZ_ZERO_UNDEF,  MVT::v2i64, Custom);
+      setOperationAction(ISD::CTTZ_ZERO_UNDEF,  MVT::v4i32, Custom);
     }
     if (Subtarget->hasDQI()) {
       setOperationAction(ISD::MUL,             MVT::v2i64, Legal);
@@ -17222,13 +17248,39 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDVa
 
 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
   MVT VT = Op.getSimpleValueType();
-  unsigned NumBits = VT.getSizeInBits();
+  unsigned NumBits = VT.getScalarSizeInBits();
   SDLoc dl(Op);
-  Op = Op.getOperand(0);
+
+  if (VT.isVector()) {
+    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+    SDValue N0 = Op.getOperand(0);
+    SDValue Zero = DAG.getConstant(0, dl, VT);
+
+    // lsb(x) = (x & -x)
+    SDValue LSB = DAG.getNode(ISD::AND, dl, VT, N0,
+                              DAG.getNode(ISD::SUB, dl, VT, Zero, N0));
+
+    // cttz_undef(x) = (width - 1) - ctlz(lsb)
+    if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF &&
+        TLI.isOperationLegal(ISD::CTLZ, VT)) {
+      SDValue WidthMinusOne = DAG.getConstant(NumBits - 1, dl, VT);
+      return DAG.getNode(ISD::SUB, dl, VT, WidthMinusOne,
+                         DAG.getNode(ISD::CTLZ, dl, VT, LSB));
+    }
+
+    // cttz(x) = ctpop(lsb - 1)
+    SDValue One = DAG.getConstant(1, dl, VT);
+    return DAG.getNode(ISD::CTPOP, dl, VT,
+                       DAG.getNode(ISD::SUB, dl, VT, LSB, One));
+  }
+
+  assert(Op.getOpcode() == ISD::CTTZ &&
+         "Only scalar CTTZ requires custom lowering");
 
   // Issue a bsf (scan bits forward) which also sets EFLAGS.
   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
-  Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
+  Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op.getOperand(0));
 
   // If src is zero (i.e. bsf sets ZF), returns NumBits.
   SDValue Ops[] = {
@@ -19168,7 +19220,8 @@ SDValue X86TargetLowering::LowerOperatio
   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
   case ISD::CTLZ:               return LowerCTLZ(Op, DAG);
   case ISD::CTLZ_ZERO_UNDEF:    return LowerCTLZ_ZERO_UNDEF(Op, DAG);
-  case ISD::CTTZ:               return LowerCTTZ(Op, DAG);
+  case ISD::CTTZ:
+  case ISD::CTTZ_ZERO_UNDEF:    return LowerCTTZ(Op, DAG);
   case ISD::MUL:                return LowerMUL(Op, Subtarget, DAG);
   case ISD::UMUL_LOHI:
   case ISD::SMUL_LOHI:          return LowerMUL_LOHI(Op, Subtarget, DAG);

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll?rev=248091&r1=248090&r2=248091&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll Sat Sep 19 08:22:57 2015
@@ -153,130 +153,154 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
 ; SSE2-LABEL: testv4i32:
 ; SSE2:       # BB#0:
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSE2-NEXT:    movd %xmm1, %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movl $32, %ecx
-; SSE2-NEXT:    cmovel %ecx, %eax
-; SSE2-NEXT:    movd %eax, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE2-NEXT:    movd %xmm2, %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    cmovel %ecx, %eax
-; SSE2-NEXT:    movd %eax, %xmm2
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT:    movd %xmm0, %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    cmovel %ecx, %eax
-; SSE2-NEXT:    movd %eax, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT:    movd %xmm0, %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    cmovel %ecx, %eax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    psubd %xmm0, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    psubd {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    psrld $1, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    psubd %xmm0, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459]
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pand %xmm0, %xmm3
+; SSE2-NEXT:    psrld $2, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    paddd %xmm3, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    psrld $4, %xmm0
+; SSE2-NEXT:    paddd %xmm2, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT:    psadbw %xmm1, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    psadbw %xmm1, %xmm0
+; SSE2-NEXT:    packuswb %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv4i32:
 ; SSE3:       # BB#0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSE3-NEXT:    movd %xmm1, %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movl $32, %ecx
-; SSE3-NEXT:    cmovel %ecx, %eax
-; SSE3-NEXT:    movd %eax, %xmm1
-; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE3-NEXT:    movd %xmm2, %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    cmovel %ecx, %eax
-; SSE3-NEXT:    movd %eax, %xmm2
-; SSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    cmovel %ecx, %eax
-; SSE3-NEXT:    movd %eax, %xmm1
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    cmovel %ecx, %eax
-; SSE3-NEXT:    movd %eax, %xmm0
-; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSE3-NEXT:    pxor %xmm1, %xmm1
+; SSE3-NEXT:    pxor %xmm2, %xmm2
+; SSE3-NEXT:    psubd %xmm0, %xmm2
+; SSE3-NEXT:    pand %xmm0, %xmm2
+; SSE3-NEXT:    psubd {{.*}}(%rip), %xmm2
+; SSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE3-NEXT:    psrld $1, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT:    psubd %xmm0, %xmm2
+; SSE3-NEXT:    movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459]
+; SSE3-NEXT:    movdqa %xmm2, %xmm3
+; SSE3-NEXT:    pand %xmm0, %xmm3
+; SSE3-NEXT:    psrld $2, %xmm2
+; SSE3-NEXT:    pand %xmm0, %xmm2
+; SSE3-NEXT:    paddd %xmm3, %xmm2
+; SSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE3-NEXT:    psrld $4, %xmm0
+; SSE3-NEXT:    paddd %xmm2, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT:    movdqa %xmm0, %xmm2
+; SSE3-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE3-NEXT:    psadbw %xmm1, %xmm2
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT:    psadbw %xmm1, %xmm0
+; SSE3-NEXT:    packuswb %xmm2, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv4i32:
 ; SSSE3:       # BB#0:
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSSE3-NEXT:    movd %xmm1, %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movl $32, %ecx
-; SSSE3-NEXT:    cmovel %ecx, %eax
-; SSSE3-NEXT:    movd %eax, %xmm1
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSSE3-NEXT:    movd %xmm2, %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    cmovel %ecx, %eax
-; SSSE3-NEXT:    movd %eax, %xmm2
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT:    movd %xmm0, %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    cmovel %ecx, %eax
-; SSSE3-NEXT:    movd %eax, %xmm1
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSSE3-NEXT:    movd %xmm0, %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    cmovel %ecx, %eax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    pxor %xmm1, %xmm1
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    psubd %xmm0, %xmm2
+; SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSSE3-NEXT:    psubd {{.*}}(%rip), %xmm2
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSSE3-NEXT:    pand %xmm3, %xmm4
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT:    movdqa %xmm0, %xmm5
+; SSSE3-NEXT:    pshufb %xmm4, %xmm5
+; SSSE3-NEXT:    psrlw $4, %xmm2
+; SSSE3-NEXT:    pand %xmm3, %xmm2
+; SSSE3-NEXT:    pshufb %xmm2, %xmm0
+; SSSE3-NEXT:    paddb %xmm5, %xmm0
+; SSSE3-NEXT:    movdqa %xmm0, %xmm2
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT:    psadbw %xmm1, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT:    psadbw %xmm1, %xmm0
+; SSSE3-NEXT:    packuswb %xmm2, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv4i32:
 ; SSE41:       # BB#0:
-; SSE41-NEXT:    pextrd $1, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    movl $32, %ecx
-; SSE41-NEXT:    cmovel %ecx, %eax
-; SSE41-NEXT:    movd %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    movd %edx, %xmm1
-; SSE41-NEXT:    pinsrd $1, %eax, %xmm1
-; SSE41-NEXT:    pextrd $2, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    cmovel %ecx, %eax
-; SSE41-NEXT:    pinsrd $2, %eax, %xmm1
-; SSE41-NEXT:    pextrd $3, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    cmovel %ecx, %eax
-; SSE41-NEXT:    pinsrd $3, %eax, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    psubd %xmm0, %xmm2
+; SSE41-NEXT:    pand %xmm0, %xmm2
+; SSE41-NEXT:    psubd {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    pand %xmm3, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    pshufb %xmm4, %xmm5
+; SSE41-NEXT:    psrlw $4, %xmm2
+; SSE41-NEXT:    pand %xmm3, %xmm2
+; SSE41-NEXT:    pshufb %xmm2, %xmm0
+; SSE41-NEXT:    paddb %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE41-NEXT:    psadbw %xmm1, %xmm2
+; SSE41-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT:    psadbw %xmm1, %xmm0
+; SSE41-NEXT:    packuswb %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX-LABEL: testv4i32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    movl $32, %ecx
-; AVX-NEXT:    cmovel %ecx, %eax
-; AVX-NEXT:    vmovd %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vmovd %edx, %xmm1
-; AVX-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    cmovel %ecx, %eax
-; AVX-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    cmovel %ecx, %eax
-; AVX-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: testv4i32:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT:    vpsadbw %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX1-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: testv4i32:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX2-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX2-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT:    vpsadbw %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %in, i1 0)
   ret <4 x i32> %out
 }
@@ -284,105 +308,154 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
 ; SSE2-LABEL: testv4i32u:
 ; SSE2:       # BB#0:
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSE2-NEXT:    movd %xmm1, %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movd %eax, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE2-NEXT:    movd %xmm2, %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movd %eax, %xmm2
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT:    movd %xmm0, %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movd %eax, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT:    movd %xmm0, %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    psubd %xmm0, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    psubd {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    psrld $1, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    psubd %xmm0, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459]
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pand %xmm0, %xmm3
+; SSE2-NEXT:    psrld $2, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    paddd %xmm3, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    psrld $4, %xmm0
+; SSE2-NEXT:    paddd %xmm2, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT:    psadbw %xmm1, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    psadbw %xmm1, %xmm0
+; SSE2-NEXT:    packuswb %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv4i32u:
 ; SSE3:       # BB#0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSE3-NEXT:    movd %xmm1, %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movd %eax, %xmm1
-; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE3-NEXT:    movd %xmm2, %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movd %eax, %xmm2
-; SSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movd %eax, %xmm1
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movd %eax, %xmm0
-; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSE3-NEXT:    pxor %xmm1, %xmm1
+; SSE3-NEXT:    pxor %xmm2, %xmm2
+; SSE3-NEXT:    psubd %xmm0, %xmm2
+; SSE3-NEXT:    pand %xmm0, %xmm2
+; SSE3-NEXT:    psubd {{.*}}(%rip), %xmm2
+; SSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE3-NEXT:    psrld $1, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT:    psubd %xmm0, %xmm2
+; SSE3-NEXT:    movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459]
+; SSE3-NEXT:    movdqa %xmm2, %xmm3
+; SSE3-NEXT:    pand %xmm0, %xmm3
+; SSE3-NEXT:    psrld $2, %xmm2
+; SSE3-NEXT:    pand %xmm0, %xmm2
+; SSE3-NEXT:    paddd %xmm3, %xmm2
+; SSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE3-NEXT:    psrld $4, %xmm0
+; SSE3-NEXT:    paddd %xmm2, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT:    movdqa %xmm0, %xmm2
+; SSE3-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE3-NEXT:    psadbw %xmm1, %xmm2
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT:    psadbw %xmm1, %xmm0
+; SSE3-NEXT:    packuswb %xmm2, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv4i32u:
 ; SSSE3:       # BB#0:
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSSE3-NEXT:    movd %xmm1, %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movd %eax, %xmm1
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSSE3-NEXT:    movd %xmm2, %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movd %eax, %xmm2
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT:    movd %xmm0, %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movd %eax, %xmm1
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSSE3-NEXT:    movd %xmm0, %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    pxor %xmm1, %xmm1
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    psubd %xmm0, %xmm2
+; SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSSE3-NEXT:    psubd {{.*}}(%rip), %xmm2
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSSE3-NEXT:    pand %xmm3, %xmm4
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT:    movdqa %xmm0, %xmm5
+; SSSE3-NEXT:    pshufb %xmm4, %xmm5
+; SSSE3-NEXT:    psrlw $4, %xmm2
+; SSSE3-NEXT:    pand %xmm3, %xmm2
+; SSSE3-NEXT:    pshufb %xmm2, %xmm0
+; SSSE3-NEXT:    paddb %xmm5, %xmm0
+; SSSE3-NEXT:    movdqa %xmm0, %xmm2
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT:    psadbw %xmm1, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT:    psadbw %xmm1, %xmm0
+; SSSE3-NEXT:    packuswb %xmm2, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv4i32u:
 ; SSE41:       # BB#0:
-; SSE41-NEXT:    pextrd $1, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    movd %xmm0, %ecx
-; SSE41-NEXT:    bsfl %ecx, %ecx
-; SSE41-NEXT:    movd %ecx, %xmm1
-; SSE41-NEXT:    pinsrd $1, %eax, %xmm1
-; SSE41-NEXT:    pextrd $2, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrd $2, %eax, %xmm1
-; SSE41-NEXT:    pextrd $3, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrd $3, %eax, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    psubd %xmm0, %xmm2
+; SSE41-NEXT:    pand %xmm0, %xmm2
+; SSE41-NEXT:    psubd {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    pand %xmm3, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    pshufb %xmm4, %xmm5
+; SSE41-NEXT:    psrlw $4, %xmm2
+; SSE41-NEXT:    pand %xmm3, %xmm2
+; SSE41-NEXT:    pshufb %xmm2, %xmm0
+; SSE41-NEXT:    paddb %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE41-NEXT:    psadbw %xmm1, %xmm2
+; SSE41-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT:    psadbw %xmm1, %xmm0
+; SSE41-NEXT:    packuswb %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX-LABEL: testv4i32u:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vmovd %xmm0, %ecx
-; AVX-NEXT:    bsfl %ecx, %ecx
-; AVX-NEXT:    vmovd %ecx, %xmm1
-; AVX-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: testv4i32u:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT:    vpsadbw %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX1-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: testv4i32u:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX2-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX2-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT:    vpsadbw %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %in, i1 -1)
   ret <4 x i32> %out
 }
@@ -390,209 +463,117 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
 ; SSE2-LABEL: testv8i16:
 ; SSE2:       # BB#0:
-; SSE2-NEXT:    pextrw $7, %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %cx
-; SSE2-NEXT:    movw $16, %ax
-; SSE2-NEXT:    cmovew %ax, %cx
-; SSE2-NEXT:    movd %ecx, %xmm1
-; SSE2-NEXT:    pextrw $3, %xmm0, %ecx
-; SSE2-NEXT:    bsfw %cx, %cx
-; SSE2-NEXT:    cmovew %ax, %cx
-; SSE2-NEXT:    movd %ecx, %xmm2
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT:    pextrw $5, %xmm0, %ecx
-; SSE2-NEXT:    bsfw %cx, %cx
-; SSE2-NEXT:    cmovew %ax, %cx
-; SSE2-NEXT:    movd %ecx, %xmm3
-; SSE2-NEXT:    pextrw $1, %xmm0, %ecx
-; SSE2-NEXT:    bsfw %cx, %cx
-; SSE2-NEXT:    cmovew %ax, %cx
-; SSE2-NEXT:    movd %ecx, %xmm1
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT:    pextrw $6, %xmm0, %ecx
-; SSE2-NEXT:    bsfw %cx, %cx
-; SSE2-NEXT:    cmovew %ax, %cx
-; SSE2-NEXT:    movd %ecx, %xmm2
-; SSE2-NEXT:    pextrw $2, %xmm0, %ecx
-; SSE2-NEXT:    bsfw %cx, %cx
-; SSE2-NEXT:    cmovew %ax, %cx
-; SSE2-NEXT:    movd %ecx, %xmm3
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT:    pextrw $4, %xmm0, %ecx
-; SSE2-NEXT:    bsfw %cx, %cx
-; SSE2-NEXT:    cmovew %ax, %cx
-; SSE2-NEXT:    movd %ecx, %xmm2
-; SSE2-NEXT:    movd %xmm0, %ecx
-; SSE2-NEXT:    bsfw %cx, %cx
-; SSE2-NEXT:    cmovew %ax, %cx
-; SSE2-NEXT:    movd %ecx, %xmm0
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    psubw %xmm0, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    psubw {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    psrlw $1, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    psubw %xmm0, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    psrlw $2, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    paddw %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrlw $4, %xmm2
+; SSE2-NEXT:    paddw %xmm1, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    psllw $8, %xmm0
+; SSE2-NEXT:    paddb %xmm2, %xmm0
+; SSE2-NEXT:    psrlw $8, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv8i16:
 ; SSE3:       # BB#0:
-; SSE3-NEXT:    pextrw $7, %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %cx
-; SSE3-NEXT:    movw $16, %ax
-; SSE3-NEXT:    cmovew %ax, %cx
-; SSE3-NEXT:    movd %ecx, %xmm1
-; SSE3-NEXT:    pextrw $3, %xmm0, %ecx
-; SSE3-NEXT:    bsfw %cx, %cx
-; SSE3-NEXT:    cmovew %ax, %cx
-; SSE3-NEXT:    movd %ecx, %xmm2
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE3-NEXT:    pextrw $5, %xmm0, %ecx
-; SSE3-NEXT:    bsfw %cx, %cx
-; SSE3-NEXT:    cmovew %ax, %cx
-; SSE3-NEXT:    movd %ecx, %xmm3
-; SSE3-NEXT:    pextrw $1, %xmm0, %ecx
-; SSE3-NEXT:    bsfw %cx, %cx
-; SSE3-NEXT:    cmovew %ax, %cx
-; SSE3-NEXT:    movd %ecx, %xmm1
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE3-NEXT:    pextrw $6, %xmm0, %ecx
-; SSE3-NEXT:    bsfw %cx, %cx
-; SSE3-NEXT:    cmovew %ax, %cx
-; SSE3-NEXT:    movd %ecx, %xmm2
-; SSE3-NEXT:    pextrw $2, %xmm0, %ecx
-; SSE3-NEXT:    bsfw %cx, %cx
-; SSE3-NEXT:    cmovew %ax, %cx
-; SSE3-NEXT:    movd %ecx, %xmm3
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE3-NEXT:    pextrw $4, %xmm0, %ecx
-; SSE3-NEXT:    bsfw %cx, %cx
-; SSE3-NEXT:    cmovew %ax, %cx
-; SSE3-NEXT:    movd %ecx, %xmm2
-; SSE3-NEXT:    movd %xmm0, %ecx
-; SSE3-NEXT:    bsfw %cx, %cx
-; SSE3-NEXT:    cmovew %ax, %cx
-; SSE3-NEXT:    movd %ecx, %xmm0
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE3-NEXT:    pxor %xmm1, %xmm1
+; SSE3-NEXT:    psubw %xmm0, %xmm1
+; SSE3-NEXT:    pand %xmm0, %xmm1
+; SSE3-NEXT:    psubw {{.*}}(%rip), %xmm1
+; SSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSE3-NEXT:    psrlw $1, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT:    psubw %xmm0, %xmm1
+; SSE3-NEXT:    movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSE3-NEXT:    pand %xmm0, %xmm2
+; SSE3-NEXT:    psrlw $2, %xmm1
+; SSE3-NEXT:    pand %xmm0, %xmm1
+; SSE3-NEXT:    paddw %xmm2, %xmm1
+; SSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSE3-NEXT:    psrlw $4, %xmm2
+; SSE3-NEXT:    paddw %xmm1, %xmm2
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE3-NEXT:    psllw $8, %xmm0
+; SSE3-NEXT:    paddb %xmm2, %xmm0
+; SSE3-NEXT:    psrlw $8, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv8i16:
 ; SSSE3:       # BB#0:
-; SSSE3-NEXT:    pextrw $7, %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %cx
-; SSSE3-NEXT:    movw $16, %ax
-; SSSE3-NEXT:    cmovew %ax, %cx
-; SSSE3-NEXT:    movd %ecx, %xmm1
-; SSSE3-NEXT:    pextrw $3, %xmm0, %ecx
-; SSSE3-NEXT:    bsfw %cx, %cx
-; SSSE3-NEXT:    cmovew %ax, %cx
-; SSSE3-NEXT:    movd %ecx, %xmm2
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSSE3-NEXT:    pextrw $5, %xmm0, %ecx
-; SSSE3-NEXT:    bsfw %cx, %cx
-; SSSE3-NEXT:    cmovew %ax, %cx
-; SSSE3-NEXT:    movd %ecx, %xmm3
-; SSSE3-NEXT:    pextrw $1, %xmm0, %ecx
-; SSSE3-NEXT:    bsfw %cx, %cx
-; SSSE3-NEXT:    cmovew %ax, %cx
-; SSSE3-NEXT:    movd %ecx, %xmm1
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT:    pextrw $6, %xmm0, %ecx
-; SSSE3-NEXT:    bsfw %cx, %cx
-; SSSE3-NEXT:    cmovew %ax, %cx
-; SSSE3-NEXT:    movd %ecx, %xmm2
-; SSSE3-NEXT:    pextrw $2, %xmm0, %ecx
-; SSSE3-NEXT:    bsfw %cx, %cx
-; SSSE3-NEXT:    cmovew %ax, %cx
-; SSSE3-NEXT:    movd %ecx, %xmm3
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSSE3-NEXT:    pextrw $4, %xmm0, %ecx
-; SSSE3-NEXT:    bsfw %cx, %cx
-; SSSE3-NEXT:    cmovew %ax, %cx
-; SSSE3-NEXT:    movd %ecx, %xmm2
-; SSSE3-NEXT:    movd %xmm0, %ecx
-; SSSE3-NEXT:    bsfw %cx, %cx
-; SSSE3-NEXT:    cmovew %ax, %cx
-; SSSE3-NEXT:    movd %ecx, %xmm0
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT:    pxor %xmm1, %xmm1
+; SSSE3-NEXT:    psubw %xmm0, %xmm1
+; SSSE3-NEXT:    pand %xmm0, %xmm1
+; SSSE3-NEXT:    psubw {{.*}}(%rip), %xmm1
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT:    movdqa %xmm3, %xmm4
+; SSSE3-NEXT:    pshufb %xmm2, %xmm4
+; SSSE3-NEXT:    psrlw $4, %xmm1
+; SSSE3-NEXT:    pand %xmm0, %xmm1
+; SSSE3-NEXT:    pshufb %xmm1, %xmm3
+; SSSE3-NEXT:    paddb %xmm4, %xmm3
+; SSSE3-NEXT:    movdqa %xmm3, %xmm0
+; SSSE3-NEXT:    psllw $8, %xmm0
+; SSSE3-NEXT:    paddb %xmm3, %xmm0
+; SSSE3-NEXT:    psrlw $8, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv8i16:
 ; SSE41:       # BB#0:
-; SSE41-NEXT:    pextrw $1, %xmm0, %eax
-; SSE41-NEXT:    bsfw %ax, %cx
-; SSE41-NEXT:    movw $16, %ax
-; SSE41-NEXT:    cmovew %ax, %cx
-; SSE41-NEXT:    movd %xmm0, %edx
-; SSE41-NEXT:    bsfw %dx, %dx
-; SSE41-NEXT:    cmovew %ax, %dx
-; SSE41-NEXT:    movd %edx, %xmm1
-; SSE41-NEXT:    pinsrw $1, %ecx, %xmm1
-; SSE41-NEXT:    pextrw $2, %xmm0, %ecx
-; SSE41-NEXT:    bsfw %cx, %cx
-; SSE41-NEXT:    cmovew %ax, %cx
-; SSE41-NEXT:    pinsrw $2, %ecx, %xmm1
-; SSE41-NEXT:    pextrw $3, %xmm0, %ecx
-; SSE41-NEXT:    bsfw %cx, %cx
-; SSE41-NEXT:    cmovew %ax, %cx
-; SSE41-NEXT:    pinsrw $3, %ecx, %xmm1
-; SSE41-NEXT:    pextrw $4, %xmm0, %ecx
-; SSE41-NEXT:    bsfw %cx, %cx
-; SSE41-NEXT:    cmovew %ax, %cx
-; SSE41-NEXT:    pinsrw $4, %ecx, %xmm1
-; SSE41-NEXT:    pextrw $5, %xmm0, %ecx
-; SSE41-NEXT:    bsfw %cx, %cx
-; SSE41-NEXT:    cmovew %ax, %cx
-; SSE41-NEXT:    pinsrw $5, %ecx, %xmm1
-; SSE41-NEXT:    pextrw $6, %xmm0, %ecx
-; SSE41-NEXT:    bsfw %cx, %cx
-; SSE41-NEXT:    cmovew %ax, %cx
-; SSE41-NEXT:    pinsrw $6, %ecx, %xmm1
-; SSE41-NEXT:    pextrw $7, %xmm0, %ecx
-; SSE41-NEXT:    bsfw %cx, %cx
-; SSE41-NEXT:    cmovew %ax, %cx
-; SSE41-NEXT:    pinsrw $7, %ecx, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    psubw %xmm0, %xmm1
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    psubw {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    pand %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    pshufb %xmm2, %xmm4
+; SSE41-NEXT:    psrlw $4, %xmm1
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    pshufb %xmm1, %xmm3
+; SSE41-NEXT:    paddb %xmm4, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    psllw $8, %xmm0
+; SSE41-NEXT:    paddb %xmm3, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv8i16:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX-NEXT:    bsfw %ax, %cx
-; AVX-NEXT:    movw $16, %ax
-; AVX-NEXT:    cmovew %ax, %cx
-; AVX-NEXT:    vmovd %xmm0, %edx
-; AVX-NEXT:    bsfw %dx, %dx
-; AVX-NEXT:    cmovew %ax, %dx
-; AVX-NEXT:    vmovd %edx, %xmm1
-; AVX-NEXT:    vpinsrw $1, %ecx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $2, %xmm0, %ecx
-; AVX-NEXT:    bsfw %cx, %cx
-; AVX-NEXT:    cmovew %ax, %cx
-; AVX-NEXT:    vpinsrw $2, %ecx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $3, %xmm0, %ecx
-; AVX-NEXT:    bsfw %cx, %cx
-; AVX-NEXT:    cmovew %ax, %cx
-; AVX-NEXT:    vpinsrw $3, %ecx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $4, %xmm0, %ecx
-; AVX-NEXT:    bsfw %cx, %cx
-; AVX-NEXT:    cmovew %ax, %cx
-; AVX-NEXT:    vpinsrw $4, %ecx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $5, %xmm0, %ecx
-; AVX-NEXT:    bsfw %cx, %cx
-; AVX-NEXT:    cmovew %ax, %cx
-; AVX-NEXT:    vpinsrw $5, %ecx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $6, %xmm0, %ecx
-; AVX-NEXT:    bsfw %cx, %cx
-; AVX-NEXT:    cmovew %ax, %cx
-; AVX-NEXT:    vpinsrw $6, %ecx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $7, %xmm0, %ecx
-; AVX-NEXT:    bsfw %cx, %cx
-; AVX-NEXT:    cmovew %ax, %cx
-; AVX-NEXT:    vpinsrw $7, %ecx, %xmm1, %xmm0
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpsllw $8, %xmm0, %xmm1
+; AVX-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %in, i1 0)
   ret <8 x i16> %out
@@ -601,164 +582,117 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
 ; SSE2-LABEL: testv8i16u:
 ; SSE2:       # BB#0:
-; SSE2-NEXT:    pextrw $7, %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %ax
-; SSE2-NEXT:    movd %eax, %xmm1
-; SSE2-NEXT:    pextrw $3, %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %ax
-; SSE2-NEXT:    movd %eax, %xmm2
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT:    pextrw $5, %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %ax
-; SSE2-NEXT:    movd %eax, %xmm1
-; SSE2-NEXT:    pextrw $1, %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %ax
-; SSE2-NEXT:    movd %eax, %xmm3
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT:    pextrw $6, %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %ax
-; SSE2-NEXT:    movd %eax, %xmm1
-; SSE2-NEXT:    pextrw $2, %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %ax
-; SSE2-NEXT:    movd %eax, %xmm2
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT:    pextrw $4, %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %ax
-; SSE2-NEXT:    movd %eax, %xmm1
-; SSE2-NEXT:    movd %xmm0, %eax
-; SSE2-NEXT:    bsfw %ax, %ax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    psubw %xmm0, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    psubw {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    psrlw $1, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    psubw %xmm0, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    psrlw $2, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    paddw %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrlw $4, %xmm2
+; SSE2-NEXT:    paddw %xmm1, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    psllw $8, %xmm0
+; SSE2-NEXT:    paddb %xmm2, %xmm0
+; SSE2-NEXT:    psrlw $8, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv8i16u:
 ; SSE3:       # BB#0:
-; SSE3-NEXT:    pextrw $7, %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %ax
-; SSE3-NEXT:    movd %eax, %xmm1
-; SSE3-NEXT:    pextrw $3, %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %ax
-; SSE3-NEXT:    movd %eax, %xmm2
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE3-NEXT:    pextrw $5, %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %ax
-; SSE3-NEXT:    movd %eax, %xmm1
-; SSE3-NEXT:    pextrw $1, %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %ax
-; SSE3-NEXT:    movd %eax, %xmm3
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE3-NEXT:    pextrw $6, %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %ax
-; SSE3-NEXT:    movd %eax, %xmm1
-; SSE3-NEXT:    pextrw $2, %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %ax
-; SSE3-NEXT:    movd %eax, %xmm2
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE3-NEXT:    pextrw $4, %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %ax
-; SSE3-NEXT:    movd %eax, %xmm1
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    bsfw %ax, %ax
-; SSE3-NEXT:    movd %eax, %xmm0
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE3-NEXT:    pxor %xmm1, %xmm1
+; SSE3-NEXT:    psubw %xmm0, %xmm1
+; SSE3-NEXT:    pand %xmm0, %xmm1
+; SSE3-NEXT:    psubw {{.*}}(%rip), %xmm1
+; SSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSE3-NEXT:    psrlw $1, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT:    psubw %xmm0, %xmm1
+; SSE3-NEXT:    movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSE3-NEXT:    pand %xmm0, %xmm2
+; SSE3-NEXT:    psrlw $2, %xmm1
+; SSE3-NEXT:    pand %xmm0, %xmm1
+; SSE3-NEXT:    paddw %xmm2, %xmm1
+; SSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSE3-NEXT:    psrlw $4, %xmm2
+; SSE3-NEXT:    paddw %xmm1, %xmm2
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE3-NEXT:    psllw $8, %xmm0
+; SSE3-NEXT:    paddb %xmm2, %xmm0
+; SSE3-NEXT:    psrlw $8, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv8i16u:
 ; SSSE3:       # BB#0:
-; SSSE3-NEXT:    pextrw $7, %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %ax
-; SSSE3-NEXT:    movd %eax, %xmm1
-; SSSE3-NEXT:    pextrw $3, %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %ax
-; SSSE3-NEXT:    movd %eax, %xmm2
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSSE3-NEXT:    pextrw $5, %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %ax
-; SSSE3-NEXT:    movd %eax, %xmm1
-; SSSE3-NEXT:    pextrw $1, %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %ax
-; SSSE3-NEXT:    movd %eax, %xmm3
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSSE3-NEXT:    pextrw $6, %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %ax
-; SSSE3-NEXT:    movd %eax, %xmm1
-; SSSE3-NEXT:    pextrw $2, %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %ax
-; SSSE3-NEXT:    movd %eax, %xmm2
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSSE3-NEXT:    pextrw $4, %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %ax
-; SSSE3-NEXT:    movd %eax, %xmm1
-; SSSE3-NEXT:    movd %xmm0, %eax
-; SSSE3-NEXT:    bsfw %ax, %ax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSSE3-NEXT:    pxor %xmm1, %xmm1
+; SSSE3-NEXT:    psubw %xmm0, %xmm1
+; SSSE3-NEXT:    pand %xmm0, %xmm1
+; SSSE3-NEXT:    psubw {{.*}}(%rip), %xmm1
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT:    movdqa %xmm3, %xmm4
+; SSSE3-NEXT:    pshufb %xmm2, %xmm4
+; SSSE3-NEXT:    psrlw $4, %xmm1
+; SSSE3-NEXT:    pand %xmm0, %xmm1
+; SSSE3-NEXT:    pshufb %xmm1, %xmm3
+; SSSE3-NEXT:    paddb %xmm4, %xmm3
+; SSSE3-NEXT:    movdqa %xmm3, %xmm0
+; SSSE3-NEXT:    psllw $8, %xmm0
+; SSSE3-NEXT:    paddb %xmm3, %xmm0
+; SSSE3-NEXT:    psrlw $8, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv8i16u:
 ; SSE41:       # BB#0:
-; SSE41-NEXT:    pextrw $1, %xmm0, %eax
-; SSE41-NEXT:    bsfw %ax, %ax
-; SSE41-NEXT:    movd %xmm0, %ecx
-; SSE41-NEXT:    bsfw %cx, %cx
-; SSE41-NEXT:    movd %ecx, %xmm1
-; SSE41-NEXT:    pinsrw $1, %eax, %xmm1
-; SSE41-NEXT:    pextrw $2, %xmm0, %eax
-; SSE41-NEXT:    bsfw %ax, %ax
-; SSE41-NEXT:    pinsrw $2, %eax, %xmm1
-; SSE41-NEXT:    pextrw $3, %xmm0, %eax
-; SSE41-NEXT:    bsfw %ax, %ax
-; SSE41-NEXT:    pinsrw $3, %eax, %xmm1
-; SSE41-NEXT:    pextrw $4, %xmm0, %eax
-; SSE41-NEXT:    bsfw %ax, %ax
-; SSE41-NEXT:    pinsrw $4, %eax, %xmm1
-; SSE41-NEXT:    pextrw $5, %xmm0, %eax
-; SSE41-NEXT:    bsfw %ax, %ax
-; SSE41-NEXT:    pinsrw $5, %eax, %xmm1
-; SSE41-NEXT:    pextrw $6, %xmm0, %eax
-; SSE41-NEXT:    bsfw %ax, %ax
-; SSE41-NEXT:    pinsrw $6, %eax, %xmm1
-; SSE41-NEXT:    pextrw $7, %xmm0, %eax
-; SSE41-NEXT:    bsfw %ax, %ax
-; SSE41-NEXT:    pinsrw $7, %eax, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    psubw %xmm0, %xmm1
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    psubw {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    pand %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    pshufb %xmm2, %xmm4
+; SSE41-NEXT:    psrlw $4, %xmm1
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    pshufb %xmm1, %xmm3
+; SSE41-NEXT:    paddb %xmm4, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    psllw $8, %xmm0
+; SSE41-NEXT:    paddb %xmm3, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv8i16u:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX-NEXT:    bsfw %ax, %ax
-; AVX-NEXT:    vmovd %xmm0, %ecx
-; AVX-NEXT:    bsfw %cx, %cx
-; AVX-NEXT:    vmovd %ecx, %xmm1
-; AVX-NEXT:    vpinsrw $1, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $2, %xmm0, %eax
-; AVX-NEXT:    bsfw %ax, %ax
-; AVX-NEXT:    vpinsrw $2, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $3, %xmm0, %eax
-; AVX-NEXT:    bsfw %ax, %ax
-; AVX-NEXT:    vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $4, %xmm0, %eax
-; AVX-NEXT:    bsfw %ax, %ax
-; AVX-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $5, %xmm0, %eax
-; AVX-NEXT:    bsfw %ax, %ax
-; AVX-NEXT:    vpinsrw $5, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $6, %xmm0, %eax
-; AVX-NEXT:    bsfw %ax, %ax
-; AVX-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrw $7, %xmm0, %eax
-; AVX-NEXT:    bsfw %ax, %ax
-; AVX-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpsllw $8, %xmm0, %xmm1
+; AVX-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %in, i1 -1)
   ret <8 x i16> %out
@@ -767,579 +701,98 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 ; SSE2-LABEL: testv16i8:
 ; SSE2:       # BB#0:
-; SSE2-NEXT:    pushq %rbp
-; SSE2-NEXT:    pushq %r14
-; SSE2-NEXT:    pushq %rbx
-; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT:    bsfl %eax, %edx
-; SSE2-NEXT:    movl $32, %eax
-; SSE2-NEXT:    cmovel %eax, %edx
-; SSE2-NEXT:    cmpl $32, %edx
-; SSE2-NEXT:    movl $8, %ecx
-; SSE2-NEXT:    cmovel %ecx, %edx
-; SSE2-NEXT:    movd %edx, %xmm0
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r14d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebx
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r9d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r11d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r8d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE2-NEXT:    bsfl %esi, %esi
-; SSE2-NEXT:    cmovel %eax, %esi
-; SSE2-NEXT:    cmpl $32, %esi
-; SSE2-NEXT:    cmovel %ecx, %esi
-; SSE2-NEXT:    movd %esi, %xmm1
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:    bsfl %edi, %esi
-; SSE2-NEXT:    cmovel %eax, %esi
-; SSE2-NEXT:    cmpl $32, %esi
-; SSE2-NEXT:    cmovel %ecx, %esi
-; SSE2-NEXT:    movd %esi, %xmm2
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r10d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebp
-; SSE2-NEXT:    bsfl %ebp, %ebp
-; SSE2-NEXT:    cmovel %eax, %ebp
-; SSE2-NEXT:    cmpl $32, %ebp
-; SSE2-NEXT:    cmovel %ecx, %ebp
-; SSE2-NEXT:    movd %ebp, %xmm0
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT:    bsfl %ebx, %ebx
-; SSE2-NEXT:    cmovel %eax, %ebx
-; SSE2-NEXT:    cmpl $32, %ebx
-; SSE2-NEXT:    cmovel %ecx, %ebx
-; SSE2-NEXT:    movd %ebx, %xmm1
-; SSE2-NEXT:    bsfl %esi, %esi
-; SSE2-NEXT:    cmovel %eax, %esi
-; SSE2-NEXT:    cmpl $32, %esi
-; SSE2-NEXT:    cmovel %ecx, %esi
-; SSE2-NEXT:    movd %esi, %xmm2
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT:    bsfl %edx, %edx
-; SSE2-NEXT:    cmovel %eax, %edx
-; SSE2-NEXT:    cmpl $32, %edx
-; SSE2-NEXT:    cmovel %ecx, %edx
-; SSE2-NEXT:    movd %edx, %xmm3
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE2-NEXT:    bsfl %esi, %esi
-; SSE2-NEXT:    cmovel %eax, %esi
-; SSE2-NEXT:    cmpl $32, %esi
-; SSE2-NEXT:    cmovel %ecx, %esi
-; SSE2-NEXT:    movd %esi, %xmm1
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:    bsfl %r14d, %esi
-; SSE2-NEXT:    cmovel %eax, %esi
-; SSE2-NEXT:    cmpl $32, %esi
-; SSE2-NEXT:    cmovel %ecx, %esi
-; SSE2-NEXT:    movd %esi, %xmm0
-; SSE2-NEXT:    bsfl %edi, %esi
-; SSE2-NEXT:    cmovel %eax, %esi
-; SSE2-NEXT:    cmpl $32, %esi
-; SSE2-NEXT:    cmovel %ecx, %esi
-; SSE2-NEXT:    movd %esi, %xmm3
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT:    bsfl %r11d, %esi
-; SSE2-NEXT:    cmovel %eax, %esi
-; SSE2-NEXT:    cmpl $32, %esi
-; SSE2-NEXT:    cmovel %ecx, %esi
-; SSE2-NEXT:    movd %esi, %xmm0
-; SSE2-NEXT:    bsfl %edx, %edx
-; SSE2-NEXT:    cmovel %eax, %edx
-; SSE2-NEXT:    cmpl $32, %edx
-; SSE2-NEXT:    cmovel %ecx, %edx
-; SSE2-NEXT:    movd %edx, %xmm2
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE2-NEXT:    bsfl %r9d, %edx
-; SSE2-NEXT:    cmovel %eax, %edx
-; SSE2-NEXT:    cmpl $32, %edx
-; SSE2-NEXT:    cmovel %ecx, %edx
-; SSE2-NEXT:    movd %edx, %xmm0
-; SSE2-NEXT:    bsfl %r10d, %edx
-; SSE2-NEXT:    cmovel %eax, %edx
-; SSE2-NEXT:    cmpl $32, %edx
-; SSE2-NEXT:    cmovel %ecx, %edx
-; SSE2-NEXT:    movd %edx, %xmm3
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT:    bsfl %r8d, %edx
-; SSE2-NEXT:    cmovel %eax, %edx
-; SSE2-NEXT:    cmpl $32, %edx
-; SSE2-NEXT:    cmovel %ecx, %edx
-; SSE2-NEXT:    movd %edx, %xmm4
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE2-NEXT:    bsfl %edx, %edx
-; SSE2-NEXT:    cmovel %eax, %edx
-; SSE2-NEXT:    cmpl $32, %edx
-; SSE2-NEXT:    cmovel %ecx, %edx
-; SSE2-NEXT:    movd %edx, %xmm0
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT:    popq %rbx
-; SSE2-NEXT:    popq %r14
-; SSE2-NEXT:    popq %rbp
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    psubb %xmm0, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    psubb {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    psrlw $1, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    psubb %xmm0, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    psrlw $2, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    paddb %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    psrlw $4, %xmm0
+; SSE2-NEXT:    paddb %xmm1, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv16i8:
 ; SSE3:       # BB#0:
-; SSE3-NEXT:    pushq %rbp
-; SSE3-NEXT:    pushq %r14
-; SSE3-NEXT:    pushq %rbx
-; SSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE3-NEXT:    bsfl %eax, %edx
-; SSE3-NEXT:    movl $32, %eax
-; SSE3-NEXT:    cmovel %eax, %edx
-; SSE3-NEXT:    cmpl $32, %edx
-; SSE3-NEXT:    movl $8, %ecx
-; SSE3-NEXT:    cmovel %ecx, %edx
-; SSE3-NEXT:    movd %edx, %xmm0
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r14d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebx
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r9d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r11d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r8d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE3-NEXT:    bsfl %esi, %esi
-; SSE3-NEXT:    cmovel %eax, %esi
-; SSE3-NEXT:    cmpl $32, %esi
-; SSE3-NEXT:    cmovel %ecx, %esi
-; SSE3-NEXT:    movd %esi, %xmm1
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE3-NEXT:    bsfl %edi, %esi
-; SSE3-NEXT:    cmovel %eax, %esi
-; SSE3-NEXT:    cmpl $32, %esi
-; SSE3-NEXT:    cmovel %ecx, %esi
-; SSE3-NEXT:    movd %esi, %xmm2
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r10d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebp
-; SSE3-NEXT:    bsfl %ebp, %ebp
-; SSE3-NEXT:    cmovel %eax, %ebp
-; SSE3-NEXT:    cmpl $32, %ebp
-; SSE3-NEXT:    cmovel %ecx, %ebp
-; SSE3-NEXT:    movd %ebp, %xmm0
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE3-NEXT:    bsfl %ebx, %ebx
-; SSE3-NEXT:    cmovel %eax, %ebx
-; SSE3-NEXT:    cmpl $32, %ebx
-; SSE3-NEXT:    cmovel %ecx, %ebx
-; SSE3-NEXT:    movd %ebx, %xmm1
-; SSE3-NEXT:    bsfl %esi, %esi
-; SSE3-NEXT:    cmovel %eax, %esi
-; SSE3-NEXT:    cmpl $32, %esi
-; SSE3-NEXT:    cmovel %ecx, %esi
-; SSE3-NEXT:    movd %esi, %xmm2
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE3-NEXT:    bsfl %edx, %edx
-; SSE3-NEXT:    cmovel %eax, %edx
-; SSE3-NEXT:    cmpl $32, %edx
-; SSE3-NEXT:    cmovel %ecx, %edx
-; SSE3-NEXT:    movd %edx, %xmm3
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE3-NEXT:    bsfl %esi, %esi
-; SSE3-NEXT:    cmovel %eax, %esi
-; SSE3-NEXT:    cmpl $32, %esi
-; SSE3-NEXT:    cmovel %ecx, %esi
-; SSE3-NEXT:    movd %esi, %xmm1
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE3-NEXT:    bsfl %r14d, %esi
-; SSE3-NEXT:    cmovel %eax, %esi
-; SSE3-NEXT:    cmpl $32, %esi
-; SSE3-NEXT:    cmovel %ecx, %esi
-; SSE3-NEXT:    movd %esi, %xmm0
-; SSE3-NEXT:    bsfl %edi, %esi
-; SSE3-NEXT:    cmovel %eax, %esi
-; SSE3-NEXT:    cmpl $32, %esi
-; SSE3-NEXT:    cmovel %ecx, %esi
-; SSE3-NEXT:    movd %esi, %xmm3
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE3-NEXT:    bsfl %r11d, %esi
-; SSE3-NEXT:    cmovel %eax, %esi
-; SSE3-NEXT:    cmpl $32, %esi
-; SSE3-NEXT:    cmovel %ecx, %esi
-; SSE3-NEXT:    movd %esi, %xmm0
-; SSE3-NEXT:    bsfl %edx, %edx
-; SSE3-NEXT:    cmovel %eax, %edx
-; SSE3-NEXT:    cmpl $32, %edx
-; SSE3-NEXT:    cmovel %ecx, %edx
-; SSE3-NEXT:    movd %edx, %xmm2
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE3-NEXT:    bsfl %r9d, %edx
-; SSE3-NEXT:    cmovel %eax, %edx
-; SSE3-NEXT:    cmpl $32, %edx
-; SSE3-NEXT:    cmovel %ecx, %edx
-; SSE3-NEXT:    movd %edx, %xmm0
-; SSE3-NEXT:    bsfl %r10d, %edx
-; SSE3-NEXT:    cmovel %eax, %edx
-; SSE3-NEXT:    cmpl $32, %edx
-; SSE3-NEXT:    cmovel %ecx, %edx
-; SSE3-NEXT:    movd %edx, %xmm3
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE3-NEXT:    bsfl %r8d, %edx
-; SSE3-NEXT:    cmovel %eax, %edx
-; SSE3-NEXT:    cmpl $32, %edx
-; SSE3-NEXT:    cmovel %ecx, %edx
-; SSE3-NEXT:    movd %edx, %xmm4
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE3-NEXT:    bsfl %edx, %edx
-; SSE3-NEXT:    cmovel %eax, %edx
-; SSE3-NEXT:    cmpl $32, %edx
-; SSE3-NEXT:    cmovel %ecx, %edx
-; SSE3-NEXT:    movd %edx, %xmm0
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE3-NEXT:    popq %rbx
-; SSE3-NEXT:    popq %r14
-; SSE3-NEXT:    popq %rbp
+; SSE3-NEXT:    pxor %xmm1, %xmm1
+; SSE3-NEXT:    psubb %xmm0, %xmm1
+; SSE3-NEXT:    pand %xmm0, %xmm1
+; SSE3-NEXT:    psubb {{.*}}(%rip), %xmm1
+; SSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSE3-NEXT:    psrlw $1, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT:    psubb %xmm0, %xmm1
+; SSE3-NEXT:    movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSE3-NEXT:    pand %xmm0, %xmm2
+; SSE3-NEXT:    psrlw $2, %xmm1
+; SSE3-NEXT:    pand %xmm0, %xmm1
+; SSE3-NEXT:    paddb %xmm2, %xmm1
+; SSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSE3-NEXT:    psrlw $4, %xmm0
+; SSE3-NEXT:    paddb %xmm1, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv16i8:
 ; SSSE3:       # BB#0:
-; SSSE3-NEXT:    pushq %rbp
-; SSSE3-NEXT:    pushq %r14
-; SSSE3-NEXT:    pushq %rbx
-; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSSE3-NEXT:    bsfl %eax, %edx
-; SSSE3-NEXT:    movl $32, %eax
-; SSSE3-NEXT:    cmovel %eax, %edx
-; SSSE3-NEXT:    cmpl $32, %edx
-; SSSE3-NEXT:    movl $8, %ecx
-; SSSE3-NEXT:    cmovel %ecx, %edx
-; SSSE3-NEXT:    movd %edx, %xmm0
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r14d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebx
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r9d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r11d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r8d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSSE3-NEXT:    bsfl %esi, %esi
-; SSSE3-NEXT:    cmovel %eax, %esi
-; SSSE3-NEXT:    cmpl $32, %esi
-; SSSE3-NEXT:    cmovel %ecx, %esi
-; SSSE3-NEXT:    movd %esi, %xmm1
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT:    bsfl %edi, %esi
-; SSSE3-NEXT:    cmovel %eax, %esi
-; SSSE3-NEXT:    cmpl $32, %esi
-; SSSE3-NEXT:    cmovel %ecx, %esi
-; SSSE3-NEXT:    movd %esi, %xmm2
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r10d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebp
-; SSSE3-NEXT:    bsfl %ebp, %ebp
-; SSSE3-NEXT:    cmovel %eax, %ebp
-; SSSE3-NEXT:    cmpl $32, %ebp
-; SSSE3-NEXT:    cmovel %ecx, %ebp
-; SSSE3-NEXT:    movd %ebp, %xmm0
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSSE3-NEXT:    bsfl %ebx, %ebx
-; SSSE3-NEXT:    cmovel %eax, %ebx
-; SSSE3-NEXT:    cmpl $32, %ebx
-; SSSE3-NEXT:    cmovel %ecx, %ebx
-; SSSE3-NEXT:    movd %ebx, %xmm1
-; SSSE3-NEXT:    bsfl %esi, %esi
-; SSSE3-NEXT:    cmovel %eax, %esi
-; SSSE3-NEXT:    cmpl $32, %esi
-; SSSE3-NEXT:    cmovel %ecx, %esi
-; SSSE3-NEXT:    movd %esi, %xmm2
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSSE3-NEXT:    bsfl %edx, %edx
-; SSSE3-NEXT:    cmovel %eax, %edx
-; SSSE3-NEXT:    cmpl $32, %edx
-; SSSE3-NEXT:    cmovel %ecx, %edx
-; SSSE3-NEXT:    movd %edx, %xmm3
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSSE3-NEXT:    bsfl %esi, %esi
-; SSSE3-NEXT:    cmovel %eax, %esi
-; SSSE3-NEXT:    cmpl $32, %esi
-; SSSE3-NEXT:    cmovel %ecx, %esi
-; SSSE3-NEXT:    movd %esi, %xmm1
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT:    bsfl %r14d, %esi
-; SSSE3-NEXT:    cmovel %eax, %esi
-; SSSE3-NEXT:    cmpl $32, %esi
-; SSSE3-NEXT:    cmovel %ecx, %esi
-; SSSE3-NEXT:    movd %esi, %xmm0
-; SSSE3-NEXT:    bsfl %edi, %esi
-; SSSE3-NEXT:    cmovel %eax, %esi
-; SSSE3-NEXT:    cmpl $32, %esi
-; SSSE3-NEXT:    cmovel %ecx, %esi
-; SSSE3-NEXT:    movd %esi, %xmm3
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSSE3-NEXT:    bsfl %r11d, %esi
-; SSSE3-NEXT:    cmovel %eax, %esi
-; SSSE3-NEXT:    cmpl $32, %esi
-; SSSE3-NEXT:    cmovel %ecx, %esi
-; SSSE3-NEXT:    movd %esi, %xmm0
-; SSSE3-NEXT:    bsfl %edx, %edx
-; SSSE3-NEXT:    cmovel %eax, %edx
-; SSSE3-NEXT:    cmpl $32, %edx
-; SSSE3-NEXT:    cmovel %ecx, %edx
-; SSSE3-NEXT:    movd %edx, %xmm2
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSSE3-NEXT:    bsfl %r9d, %edx
-; SSSE3-NEXT:    cmovel %eax, %edx
-; SSSE3-NEXT:    cmpl $32, %edx
-; SSSE3-NEXT:    cmovel %ecx, %edx
-; SSSE3-NEXT:    movd %edx, %xmm0
-; SSSE3-NEXT:    bsfl %r10d, %edx
-; SSSE3-NEXT:    cmovel %eax, %edx
-; SSSE3-NEXT:    cmpl $32, %edx
-; SSSE3-NEXT:    cmovel %ecx, %edx
-; SSSE3-NEXT:    movd %edx, %xmm3
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSSE3-NEXT:    bsfl %r8d, %edx
-; SSSE3-NEXT:    cmovel %eax, %edx
-; SSSE3-NEXT:    cmpl $32, %edx
-; SSSE3-NEXT:    cmovel %ecx, %edx
-; SSSE3-NEXT:    movd %edx, %xmm4
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSSE3-NEXT:    bsfl %edx, %edx
-; SSSE3-NEXT:    cmovel %eax, %edx
-; SSSE3-NEXT:    cmpl $32, %edx
-; SSSE3-NEXT:    cmovel %ecx, %edx
-; SSSE3-NEXT:    movd %edx, %xmm0
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSSE3-NEXT:    popq %rbx
-; SSSE3-NEXT:    popq %r14
-; SSSE3-NEXT:    popq %rbp
+; SSSE3-NEXT:    pxor %xmm1, %xmm1
+; SSSE3-NEXT:    psubb %xmm0, %xmm1
+; SSSE3-NEXT:    pand %xmm0, %xmm1
+; SSSE3-NEXT:    psubb {{.*}}(%rip), %xmm1
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm3
+; SSSE3-NEXT:    pand %xmm2, %xmm3
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT:    movdqa %xmm0, %xmm4
+; SSSE3-NEXT:    pshufb %xmm3, %xmm4
+; SSSE3-NEXT:    psrlw $4, %xmm1
+; SSSE3-NEXT:    pand %xmm2, %xmm1
+; SSSE3-NEXT:    pshufb %xmm1, %xmm0
+; SSSE3-NEXT:    paddb %xmm4, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv16i8:
 ; SSE41:       # BB#0:
-; SSE41-NEXT:    pextrb $1, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %edx
-; SSE41-NEXT:    movl $32, %eax
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    movl $8, %ecx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pextrb $0, %xmm0, %esi
-; SSE41-NEXT:    bsfl %esi, %esi
-; SSE41-NEXT:    cmovel %eax, %esi
-; SSE41-NEXT:    cmpl $32, %esi
-; SSE41-NEXT:    cmovel %ecx, %esi
-; SSE41-NEXT:    movd %esi, %xmm1
-; SSE41-NEXT:    pinsrb $1, %edx, %xmm1
-; SSE41-NEXT:    pextrb $2, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $2, %edx, %xmm1
-; SSE41-NEXT:    pextrb $3, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $3, %edx, %xmm1
-; SSE41-NEXT:    pextrb $4, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $4, %edx, %xmm1
-; SSE41-NEXT:    pextrb $5, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $5, %edx, %xmm1
-; SSE41-NEXT:    pextrb $6, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $6, %edx, %xmm1
-; SSE41-NEXT:    pextrb $7, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $7, %edx, %xmm1
-; SSE41-NEXT:    pextrb $8, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $8, %edx, %xmm1
-; SSE41-NEXT:    pextrb $9, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $9, %edx, %xmm1
-; SSE41-NEXT:    pextrb $10, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $10, %edx, %xmm1
-; SSE41-NEXT:    pextrb $11, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $11, %edx, %xmm1
-; SSE41-NEXT:    pextrb $12, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $12, %edx, %xmm1
-; SSE41-NEXT:    pextrb $13, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $13, %edx, %xmm1
-; SSE41-NEXT:    pextrb $14, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $14, %edx, %xmm1
-; SSE41-NEXT:    pextrb $15, %xmm0, %edx
-; SSE41-NEXT:    bsfl %edx, %edx
-; SSE41-NEXT:    cmovel %eax, %edx
-; SSE41-NEXT:    cmpl $32, %edx
-; SSE41-NEXT:    cmovel %ecx, %edx
-; SSE41-NEXT:    pinsrb $15, %edx, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    psubb %xmm0, %xmm1
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    psubb {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pand %xmm2, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pshufb %xmm3, %xmm4
+; SSE41-NEXT:    psrlw $4, %xmm1
+; SSE41-NEXT:    pand %xmm2, %xmm1
+; SSE41-NEXT:    pshufb %xmm1, %xmm0
+; SSE41-NEXT:    paddb %xmm4, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv16i8:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %edx
-; AVX-NEXT:    movl $32, %eax
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    movl $8, %ecx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpextrb $0, %xmm0, %esi
-; AVX-NEXT:    bsfl %esi, %esi
-; AVX-NEXT:    cmovel %eax, %esi
-; AVX-NEXT:    cmpl $32, %esi
-; AVX-NEXT:    cmovel %ecx, %esi
-; AVX-NEXT:    vmovd %esi, %xmm1
-; AVX-NEXT:    vpinsrb $1, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $2, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $2, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $3, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $3, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $4, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $4, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $5, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $5, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $6, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $6, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $7, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $7, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $8, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $8, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $9, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $9, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $10, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $10, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $11, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $11, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $12, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $12, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $13, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $13, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $14, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $14, %edx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $15, %xmm0, %edx
-; AVX-NEXT:    bsfl %edx, %edx
-; AVX-NEXT:    cmovel %eax, %edx
-; AVX-NEXT:    cmpl $32, %edx
-; AVX-NEXT:    cmovel %ecx, %edx
-; AVX-NEXT:    vpinsrb $15, %edx, %xmm1, %xmm0
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 0)
   ret <16 x i8> %out
@@ -1348,317 +801,98 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 ; SSE2-LABEL: testv16i8u:
 ; SSE2:       # BB#0:
-; SSE2-NEXT:    pushq %rbx
-; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r11d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r9d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r10d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r8d
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE2-NEXT:    bsfl %edx, %edx
-; SSE2-NEXT:    movd %edx, %xmm1
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:    bsfl %edi, %edx
-; SSE2-NEXT:    movd %edx, %xmm0
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebx
-; SSE2-NEXT:    bsfl %ebx, %ebx
-; SSE2-NEXT:    movd %ebx, %xmm2
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    bsfl %esi, %eax
-; SSE2-NEXT:    movd %eax, %xmm3
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT:    bsfl %ecx, %eax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT:    bsfl %ecx, %ecx
-; SSE2-NEXT:    movd %ecx, %xmm1
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE2-NEXT:    bsfl %r11d, %ecx
-; SSE2-NEXT:    movd %ecx, %xmm0
-; SSE2-NEXT:    bsfl %edx, %ecx
-; SSE2-NEXT:    movd %ecx, %xmm2
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT:    bsfl %r10d, %ecx
-; SSE2-NEXT:    movd %ecx, %xmm0
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movd %eax, %xmm3
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-NEXT:    bsfl %r9d, %eax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    bsfl %edi, %eax
-; SSE2-NEXT:    movd %eax, %xmm2
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT:    bsfl %r8d, %eax
-; SSE2-NEXT:    movd %eax, %xmm4
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT:    bsfl %eax, %eax
-; SSE2-NEXT:    movd %eax, %xmm0
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT:    popq %rbx
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    psubb %xmm0, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    psubb {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    psrlw $1, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    psubb %xmm0, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    psrlw $2, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    paddb %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    psrlw $4, %xmm0
+; SSE2-NEXT:    paddb %xmm1, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv16i8u:
 ; SSE3:       # BB#0:
-; SSE3-NEXT:    pushq %rbx
-; SSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movd %eax, %xmm0
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r11d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r9d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r10d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r8d
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE3-NEXT:    bsfl %edx, %edx
-; SSE3-NEXT:    movd %edx, %xmm1
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE3-NEXT:    bsfl %edi, %edx
-; SSE3-NEXT:    movd %edx, %xmm0
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebx
-; SSE3-NEXT:    bsfl %ebx, %ebx
-; SSE3-NEXT:    movd %ebx, %xmm2
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movd %eax, %xmm0
-; SSE3-NEXT:    bsfl %esi, %eax
-; SSE3-NEXT:    movd %eax, %xmm3
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE3-NEXT:    bsfl %ecx, %eax
-; SSE3-NEXT:    movd %eax, %xmm0
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE3-NEXT:    bsfl %ecx, %ecx
-; SSE3-NEXT:    movd %ecx, %xmm1
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE3-NEXT:    bsfl %r11d, %ecx
-; SSE3-NEXT:    movd %ecx, %xmm0
-; SSE3-NEXT:    bsfl %edx, %ecx
-; SSE3-NEXT:    movd %ecx, %xmm2
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE3-NEXT:    bsfl %r10d, %ecx
-; SSE3-NEXT:    movd %ecx, %xmm0
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movd %eax, %xmm3
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE3-NEXT:    bsfl %r9d, %eax
-; SSE3-NEXT:    movd %eax, %xmm0
-; SSE3-NEXT:    bsfl %edi, %eax
-; SSE3-NEXT:    movd %eax, %xmm2
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE3-NEXT:    bsfl %r8d, %eax
-; SSE3-NEXT:    movd %eax, %xmm4
-; SSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE3-NEXT:    bsfl %eax, %eax
-; SSE3-NEXT:    movd %eax, %xmm0
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE3-NEXT:    popq %rbx
+; SSE3-NEXT:    pxor %xmm1, %xmm1
+; SSE3-NEXT:    psubb %xmm0, %xmm1
+; SSE3-NEXT:    pand %xmm0, %xmm1
+; SSE3-NEXT:    psubb {{.*}}(%rip), %xmm1
+; SSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSE3-NEXT:    psrlw $1, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT:    psubb %xmm0, %xmm1
+; SSE3-NEXT:    movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSE3-NEXT:    pand %xmm0, %xmm2
+; SSE3-NEXT:    psrlw $2, %xmm1
+; SSE3-NEXT:    pand %xmm0, %xmm1
+; SSE3-NEXT:    paddb %xmm2, %xmm1
+; SSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSE3-NEXT:    psrlw $4, %xmm0
+; SSE3-NEXT:    paddb %xmm1, %xmm0
+; SSE3-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv16i8u:
 ; SSSE3:       # BB#0:
-; SSSE3-NEXT:    pushq %rbx
-; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r11d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r9d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r10d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r8d
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSSE3-NEXT:    bsfl %edx, %edx
-; SSSE3-NEXT:    movd %edx, %xmm1
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT:    bsfl %edi, %edx
-; SSSE3-NEXT:    movd %edx, %xmm0
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ebx
-; SSSE3-NEXT:    bsfl %ebx, %ebx
-; SSSE3-NEXT:    movd %ebx, %xmm2
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    bsfl %esi, %eax
-; SSSE3-NEXT:    movd %eax, %xmm3
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSSE3-NEXT:    bsfl %ecx, %eax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSSE3-NEXT:    bsfl %ecx, %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm1
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSSE3-NEXT:    bsfl %r11d, %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm0
-; SSSE3-NEXT:    bsfl %edx, %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm2
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSSE3-NEXT:    bsfl %r10d, %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm0
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movd %eax, %xmm3
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSSE3-NEXT:    bsfl %r9d, %eax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    bsfl %edi, %eax
-; SSSE3-NEXT:    movd %eax, %xmm2
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSSE3-NEXT:    bsfl %r8d, %eax
-; SSSE3-NEXT:    movd %eax, %xmm4
-; SSSE3-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSSE3-NEXT:    bsfl %eax, %eax
-; SSSE3-NEXT:    movd %eax, %xmm0
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSSE3-NEXT:    popq %rbx
+; SSSE3-NEXT:    pxor %xmm1, %xmm1
+; SSSE3-NEXT:    psubb %xmm0, %xmm1
+; SSSE3-NEXT:    pand %xmm0, %xmm1
+; SSSE3-NEXT:    psubb {{.*}}(%rip), %xmm1
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm3
+; SSSE3-NEXT:    pand %xmm2, %xmm3
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT:    movdqa %xmm0, %xmm4
+; SSSE3-NEXT:    pshufb %xmm3, %xmm4
+; SSSE3-NEXT:    psrlw $4, %xmm1
+; SSSE3-NEXT:    pand %xmm2, %xmm1
+; SSSE3-NEXT:    pshufb %xmm1, %xmm0
+; SSSE3-NEXT:    paddb %xmm4, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv16i8u:
 ; SSE41:       # BB#0:
-; SSE41-NEXT:    pextrb $1, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pextrb $0, %xmm0, %ecx
-; SSE41-NEXT:    bsfl %ecx, %ecx
-; SSE41-NEXT:    movd %ecx, %xmm1
-; SSE41-NEXT:    pinsrb $1, %eax, %xmm1
-; SSE41-NEXT:    pextrb $2, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $2, %eax, %xmm1
-; SSE41-NEXT:    pextrb $3, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $3, %eax, %xmm1
-; SSE41-NEXT:    pextrb $4, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $4, %eax, %xmm1
-; SSE41-NEXT:    pextrb $5, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $5, %eax, %xmm1
-; SSE41-NEXT:    pextrb $6, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $6, %eax, %xmm1
-; SSE41-NEXT:    pextrb $7, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $7, %eax, %xmm1
-; SSE41-NEXT:    pextrb $8, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $8, %eax, %xmm1
-; SSE41-NEXT:    pextrb $9, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $9, %eax, %xmm1
-; SSE41-NEXT:    pextrb $10, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $10, %eax, %xmm1
-; SSE41-NEXT:    pextrb $11, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $11, %eax, %xmm1
-; SSE41-NEXT:    pextrb $12, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $12, %eax, %xmm1
-; SSE41-NEXT:    pextrb $13, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $13, %eax, %xmm1
-; SSE41-NEXT:    pextrb $14, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $14, %eax, %xmm1
-; SSE41-NEXT:    pextrb $15, %xmm0, %eax
-; SSE41-NEXT:    bsfl %eax, %eax
-; SSE41-NEXT:    pinsrb $15, %eax, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    psubb %xmm0, %xmm1
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    psubb {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pand %xmm2, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pshufb %xmm3, %xmm4
+; SSE41-NEXT:    psrlw $4, %xmm1
+; SSE41-NEXT:    pand %xmm2, %xmm1
+; SSE41-NEXT:    pshufb %xmm1, %xmm0
+; SSE41-NEXT:    paddb %xmm4, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv16i8u:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpextrb $0, %xmm0, %ecx
-; AVX-NEXT:    bsfl %ecx, %ecx
-; AVX-NEXT:    vmovd %ecx, %xmm1
-; AVX-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $2, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $3, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $4, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $5, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $6, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $7, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $8, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $9, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $10, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $11, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $12, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $13, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $14, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrb $15, %xmm0, %eax
-; AVX-NEXT:    bsfl %eax, %eax
-; AVX-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 -1)
   ret <16 x i8> %out

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll?rev=248091&r1=248090&r2=248091&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll Sat Sep 19 08:22:57 2015
@@ -5,51 +5,50 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX1-LABEL: testv4i64:
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX1-NEXT:    bsfq %rax, %rax
-; AVX1-NEXT:    movl $64, %ecx
-; AVX1-NEXT:    cmoveq %rcx, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm2
-; AVX1-NEXT:    vmovq %xmm1, %rax
-; AVX1-NEXT:    bsfq %rax, %rax
-; AVX1-NEXT:    cmoveq %rcx, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm1
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    bsfq %rax, %rax
-; AVX1-NEXT:    cmoveq %rcx, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm2
-; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    bsfq %rax, %rax
-; AVX1-NEXT:    cmoveq %rcx, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm0
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpaddb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsadbw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
+; AVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsadbw %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv4i64:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX2-NEXT:    bsfq %rax, %rax
-; AVX2-NEXT:    movl $64, %ecx
-; AVX2-NEXT:    cmoveq %rcx, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm2
-; AVX2-NEXT:    vmovq %xmm1, %rax
-; AVX2-NEXT:    bsfq %rax, %rax
-; AVX2-NEXT:    cmoveq %rcx, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm1
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    bsfq %rax, %rax
-; AVX2-NEXT:    cmoveq %rcx, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm2
-; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    bsfq %rax, %rax
-; AVX2-NEXT:    cmoveq %rcx, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm0
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX2-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsadbw %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
   %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %in, i1 0)
   ret <4 x i64> %out
@@ -59,41 +58,50 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX1-LABEL: testv4i64u:
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX1-NEXT:    bsfq %rax, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm2
-; AVX1-NEXT:    vmovq %xmm1, %rax
-; AVX1-NEXT:    bsfq %rax, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm1
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    bsfq %rax, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm2
-; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    bsfq %rax, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm0
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpaddb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsadbw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
+; AVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsadbw %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv4i64u:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX2-NEXT:    bsfq %rax, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm2
-; AVX2-NEXT:    vmovq %xmm1, %rax
-; AVX2-NEXT:    bsfq %rax, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm1
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    bsfq %rax, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm2
-; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    bsfq %rax, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm0
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX2-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsadbw %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
   %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %in, i1 -1)
   ret <4 x i64> %out
@@ -102,80 +110,63 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
 ; AVX1-LABEL: testv8i32:
 ; AVX1:       # BB#0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %ecx
-; AVX1-NEXT:    movl $32, %eax
-; AVX1-NEXT:    cmovel %eax, %ecx
-; AVX1-NEXT:    vmovd %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    vmovd %edx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    cmovel %eax, %ecx
-; AVX1-NEXT:    vpinsrd $2, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm1, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    cmovel %eax, %ecx
-; AVX1-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    cmovel %eax, %ecx
-; AVX1-NEXT:    vmovd %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    vmovd %edx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm0, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    cmovel %eax, %ecx
-; AVX1-NEXT:    vpinsrd $2, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    cmovel %eax, %ecx
-; AVX1-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
+; AVX1-NEXT:    vpsubd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
+; AVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX1-NEXT:    vpsadbw %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-NEXT:    vpsadbw %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
+; AVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT:    vpsadbw %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX1-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv8i32:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrd $1, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %ecx
-; AVX2-NEXT:    movl $32, %eax
-; AVX2-NEXT:    cmovel %eax, %ecx
-; AVX2-NEXT:    vmovd %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    vmovd %edx, %xmm2
-; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    cmovel %eax, %ecx
-; AVX2-NEXT:    vpinsrd $2, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $3, %xmm1, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    cmovel %eax, %ecx
-; AVX2-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm1
-; AVX2-NEXT:    vpextrd $1, %xmm0, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    cmovel %eax, %ecx
-; AVX2-NEXT:    vmovd %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    vmovd %edx, %xmm2
-; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $2, %xmm0, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    cmovel %eax, %ecx
-; AVX2-NEXT:    vpinsrd $2, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $3, %xmm0, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    cmovel %eax, %ecx
-; AVX2-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX2-NEXT:    vpsubd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX2-NEXT:    vpsadbw %ymm2, %ymm1, %ymm2
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX2-NEXT:    vpsadbw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %in, i1 0)
   ret <8 x i32> %out
@@ -184,62 +175,63 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
 ; AVX1-LABEL: testv8i32u:
 ; AVX1:       # BB#0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vmovd %xmm1, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vmovd %xmm0, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
+; AVX1-NEXT:    vpsubd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
+; AVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX1-NEXT:    vpsadbw %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-NEXT:    vpsadbw %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
+; AVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT:    vpsadbw %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX1-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv8i32u:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrd $1, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vmovd %xmm1, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    vmovd %ecx, %xmm2
-; AVX2-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $2, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vmovd %xmm0, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    vmovd %ecx, %xmm2
-; AVX2-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX2-NEXT:    vpsubd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX2-NEXT:    vpsadbw %ymm2, %ymm1, %ymm2
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX2-NEXT:    vpsadbw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %in, i1 -1)
   ret <8 x i32> %out
@@ -249,143 +241,55 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX1-LABEL: testv16i16:
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX1-NEXT:    bsfw %ax, %cx
-; AVX1-NEXT:    movw $16, %ax
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vmovd %xmm1, %edx
-; AVX1-NEXT:    bsfw %dx, %dx
-; AVX1-NEXT:    cmovew %ax, %dx
-; AVX1-NEXT:    vmovd %edx, %xmm2
-; AVX1-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $2, %xmm1, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $2, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $3, %xmm1, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $3, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $4, %xmm1, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $4, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $5, %xmm1, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $5, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $6, %xmm1, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $7, %xmm1, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrw $1, %xmm0, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vmovd %xmm0, %edx
-; AVX1-NEXT:    bsfw %dx, %dx
-; AVX1-NEXT:    cmovew %ax, %dx
-; AVX1-NEXT:    vmovd %edx, %xmm2
-; AVX1-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $2, %xmm0, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $2, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $3, %xmm0, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $3, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $4, %xmm0, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $4, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $5, %xmm0, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $5, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $6, %xmm0, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $7, %xmm0, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    cmovew %ax, %cx
-; AVX1-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubw %xmm0, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpaddb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $8, %xmm2, %xmm4
+; AVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv16i16:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX2-NEXT:    bsfw %ax, %cx
-; AVX2-NEXT:    movw $16, %ax
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vmovd %xmm1, %edx
-; AVX2-NEXT:    bsfw %dx, %dx
-; AVX2-NEXT:    cmovew %ax, %dx
-; AVX2-NEXT:    vmovd %edx, %xmm2
-; AVX2-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $2, %xmm1, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $2, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $3, %xmm1, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $3, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $4, %xmm1, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $4, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $5, %xmm1, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $5, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $6, %xmm1, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $7, %xmm1, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm1
-; AVX2-NEXT:    vpextrw $1, %xmm0, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vmovd %xmm0, %edx
-; AVX2-NEXT:    bsfw %dx, %dx
-; AVX2-NEXT:    cmovew %ax, %dx
-; AVX2-NEXT:    vmovd %edx, %xmm2
-; AVX2-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $2, %xmm0, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $2, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $3, %xmm0, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $3, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $4, %xmm0, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $4, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $5, %xmm0, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $5, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $6, %xmm0, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $7, %xmm0, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    cmovew %ax, %cx
-; AVX2-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllw $8, %ymm0, %ymm1
+; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpsrlw $8, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %in, i1 0)
   ret <16 x i16> %out
@@ -395,109 +299,55 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX1-LABEL: testv16i16u:
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vmovd %xmm1, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $2, %xmm1, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $3, %xmm1, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $4, %xmm1, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $5, %xmm1, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $6, %xmm1, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $7, %xmm1, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vmovd %xmm0, %ecx
-; AVX1-NEXT:    bsfw %cx, %cx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $2, %xmm0, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $3, %xmm0, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $4, %xmm0, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $5, %xmm0, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $6, %xmm0, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrw $7, %xmm0, %eax
-; AVX1-NEXT:    bsfw %ax, %ax
-; AVX1-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubw %xmm0, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpaddb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $8, %xmm2, %xmm4
+; AVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv16i16u:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vmovd %xmm1, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    vmovd %ecx, %xmm2
-; AVX2-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $2, %xmm1, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $3, %xmm1, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $4, %xmm1, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $5, %xmm1, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $6, %xmm1, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $7, %xmm1, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm1
-; AVX2-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vmovd %xmm0, %ecx
-; AVX2-NEXT:    bsfw %cx, %cx
-; AVX2-NEXT:    vmovd %ecx, %xmm2
-; AVX2-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $2, %xmm0, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $3, %xmm0, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $4, %xmm0, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $5, %xmm0, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $6, %xmm0, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrw $7, %xmm0, %eax
-; AVX2-NEXT:    bsfw %ax, %ax
-; AVX2-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllw $8, %ymm0, %ymm1
+; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpsrlw $8, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %in, i1 -1)
   ret <16 x i16> %out
@@ -507,401 +357,46 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX1-LABEL: testv32i8:
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrb $1, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %edx
-; AVX1-NEXT:    movl $32, %eax
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    movl $8, %ecx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpextrb $0, %xmm1, %esi
-; AVX1-NEXT:    bsfl %esi, %esi
-; AVX1-NEXT:    cmovel %eax, %esi
-; AVX1-NEXT:    cmpl $32, %esi
-; AVX1-NEXT:    cmovel %ecx, %esi
-; AVX1-NEXT:    vmovd %esi, %xmm2
-; AVX1-NEXT:    vpinsrb $1, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $2, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $2, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $3, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $3, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $4, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $4, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $5, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $5, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $6, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $6, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $7, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $7, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $8, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $8, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $9, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $9, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $10, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $10, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $11, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $11, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $12, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $12, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $13, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $13, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $14, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $14, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $15, %xmm1, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $15, %edx, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrb $1, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpextrb $0, %xmm0, %esi
-; AVX1-NEXT:    bsfl %esi, %esi
-; AVX1-NEXT:    cmovel %eax, %esi
-; AVX1-NEXT:    cmpl $32, %esi
-; AVX1-NEXT:    cmovel %ecx, %esi
-; AVX1-NEXT:    vmovd %esi, %xmm2
-; AVX1-NEXT:    vpinsrb $1, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $2, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $2, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $3, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $3, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $4, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $4, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $5, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $5, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $6, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $6, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $7, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $7, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $8, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $8, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $9, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $9, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $10, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $10, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $11, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $11, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $12, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $12, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $13, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $13, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $14, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $14, %edx, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $15, %xmm0, %edx
-; AVX1-NEXT:    bsfl %edx, %edx
-; AVX1-NEXT:    cmovel %eax, %edx
-; AVX1-NEXT:    cmpl $32, %edx
-; AVX1-NEXT:    cmovel %ecx, %edx
-; AVX1-NEXT:    vpinsrb $15, %edx, %xmm2, %xmm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubb %xmm0, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpaddb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv32i8:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrb $1, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %edx
-; AVX2-NEXT:    movl $32, %eax
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    movl $8, %ecx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpextrb $0, %xmm1, %esi
-; AVX2-NEXT:    bsfl %esi, %esi
-; AVX2-NEXT:    cmovel %eax, %esi
-; AVX2-NEXT:    cmpl $32, %esi
-; AVX2-NEXT:    cmovel %ecx, %esi
-; AVX2-NEXT:    vmovd %esi, %xmm2
-; AVX2-NEXT:    vpinsrb $1, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $2, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $2, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $3, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $3, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $4, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $4, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $5, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $5, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $6, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $6, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $7, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $7, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $8, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $8, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $9, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $9, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $10, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $10, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $11, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $11, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $12, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $12, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $13, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $13, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $14, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $14, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $15, %xmm1, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $15, %edx, %xmm2, %xmm1
-; AVX2-NEXT:    vpextrb $1, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpextrb $0, %xmm0, %esi
-; AVX2-NEXT:    bsfl %esi, %esi
-; AVX2-NEXT:    cmovel %eax, %esi
-; AVX2-NEXT:    cmpl $32, %esi
-; AVX2-NEXT:    cmovel %ecx, %esi
-; AVX2-NEXT:    vmovd %esi, %xmm2
-; AVX2-NEXT:    vpinsrb $1, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $2, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $2, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $3, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $3, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $4, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $4, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $5, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $5, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $6, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $6, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $7, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $7, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $8, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $8, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $9, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $9, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $10, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $10, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $11, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $11, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $12, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $12, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $13, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $13, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $14, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $14, %edx, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $15, %xmm0, %edx
-; AVX2-NEXT:    bsfl %edx, %edx
-; AVX2-NEXT:    cmovel %eax, %edx
-; AVX2-NEXT:    cmpl $32, %edx
-; AVX2-NEXT:    cmovel %ecx, %edx
-; AVX2-NEXT:    vpinsrb $15, %edx, %xmm2, %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 0)
   ret <32 x i8> %out
@@ -911,205 +406,46 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX1-LABEL: testv32i8u:
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrb $1, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpextrb $0, %xmm1, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $2, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $3, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $4, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $5, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $6, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $7, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $8, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $9, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $10, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $11, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $12, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $13, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $14, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $15, %xmm1, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpextrb $0, %xmm0, %ecx
-; AVX1-NEXT:    bsfl %ecx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $2, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $3, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $4, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $5, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $6, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $7, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $8, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $9, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $10, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $11, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $12, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $13, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $14, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrb $15, %xmm0, %eax
-; AVX1-NEXT:    bsfl %eax, %eax
-; AVX1-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubb %xmm0, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpaddb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv32i8u:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrb $1, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpextrb $0, %xmm1, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    vmovd %ecx, %xmm2
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $2, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $3, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $4, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $5, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $6, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $7, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $8, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $9, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $10, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $11, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $12, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $13, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $14, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $15, %xmm1, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX2-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpextrb $0, %xmm0, %ecx
-; AVX2-NEXT:    bsfl %ecx, %ecx
-; AVX2-NEXT:    vmovd %ecx, %xmm2
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $2, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $3, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $4, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $5, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $6, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $7, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $8, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $9, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $10, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $11, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $12, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $13, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $14, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrb $15, %xmm0, %eax
-; AVX2-NEXT:    bsfl %eax, %eax
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 -1)
   ret <32 x i8> %out

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll?rev=248091&r1=248090&r2=248091&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll Sat Sep 19 08:22:57 2015
@@ -45,40 +45,12 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
 ; ALL-LABEL: testv8i64u:
 ; ALL:       ## BB#0:
-; ALL-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
-; ALL-NEXT:    vpextrq $1, %xmm1, %rax
-; ALL-NEXT:    tzcntq %rax, %rax
-; ALL-NEXT:    vmovq %rax, %xmm2
-; ALL-NEXT:    vmovq %xmm1, %rax
-; ALL-NEXT:    tzcntq %rax, %rax
-; ALL-NEXT:    vmovq %rax, %xmm1
-; ALL-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; ALL-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
-; ALL-NEXT:    vpextrq $1, %xmm2, %rax
-; ALL-NEXT:    tzcntq %rax, %rax
-; ALL-NEXT:    vmovq %rax, %xmm3
-; ALL-NEXT:    vmovq %xmm2, %rax
-; ALL-NEXT:    tzcntq %rax, %rax
-; ALL-NEXT:    vmovq %rax, %xmm2
-; ALL-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; ALL-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
-; ALL-NEXT:    vextracti32x4 $1, %zmm0, %xmm2
-; ALL-NEXT:    vpextrq $1, %xmm2, %rax
-; ALL-NEXT:    tzcntq %rax, %rax
-; ALL-NEXT:    vmovq %rax, %xmm3
-; ALL-NEXT:    vmovq %xmm2, %rax
-; ALL-NEXT:    tzcntq %rax, %rax
-; ALL-NEXT:    vmovq %rax, %xmm2
-; ALL-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; ALL-NEXT:    vpextrq $1, %xmm0, %rax
-; ALL-NEXT:    tzcntq %rax, %rax
-; ALL-NEXT:    vmovq %rax, %xmm3
-; ALL-NEXT:    vmovq %xmm0, %rax
-; ALL-NEXT:    tzcntq %rax, %rax
-; ALL-NEXT:    vmovq %rax, %xmm0
-; ALL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
+; ALL-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; ALL-NEXT:    vplzcntq %zmm0, %zmm0
+; ALL-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm1
+; ALL-NEXT:    vpsubq %zmm0, %zmm1, %zmm0
 ; ALL-NEXT:    retq
   %out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 -1)
   ret <8 x i64> %out
@@ -149,60 +121,12 @@ define <16 x i32> @testv16i32(<16 x i32>
 define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
 ; ALL-LABEL: testv16i32u:
 ; ALL:       ## BB#0:
-; ALL-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
-; ALL-NEXT:    vpextrd $1, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vmovd %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    vmovd %ecx, %xmm2
-; ALL-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; ALL-NEXT:    vpextrd $2, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; ALL-NEXT:    vpextrd $3, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1
-; ALL-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
-; ALL-NEXT:    vpextrd $1, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vmovd %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrd $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrd $2, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrd $3, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm2
-; ALL-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
-; ALL-NEXT:    vextracti32x4 $1, %zmm0, %xmm2
-; ALL-NEXT:    vpextrd $1, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vmovd %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrd $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrd $2, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrd $3, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm2
-; ALL-NEXT:    vpextrd $1, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vmovd %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrd $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrd $2, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrd $3, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm0
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
+; ALL-NEXT:    vpandd %zmm1, %zmm0, %zmm0
+; ALL-NEXT:    vplzcntd %zmm0, %zmm0
+; ALL-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm1
+; ALL-NEXT:    vpsubd %zmm0, %zmm1, %zmm0
 ; ALL-NEXT:    retq
   %out = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %in, i1 -1)
   ret <16 x i32> %out
@@ -211,106 +135,34 @@ define <16 x i32> @testv16i32u(<16 x i32
 define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
 ; ALL-LABEL: testv32i16:
 ; ALL:       ## BB#0:
-; ALL-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; ALL-NEXT:    vpextrw $1, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vmovd %xmm2, %ecx
-; ALL-NEXT:    tzcntw %cx, %cx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $2, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $3, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $4, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $5, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $6, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $7, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
-; ALL-NEXT:    vpextrw $1, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vmovd %xmm0, %ecx
-; ALL-NEXT:    tzcntw %cx, %cx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $2, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $3, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $4, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $5, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $6, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $7, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm0
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; ALL-NEXT:    vpextrw $1, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vmovd %xmm2, %ecx
-; ALL-NEXT:    tzcntw %cx, %cx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $2, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $3, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $4, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $5, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $6, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $7, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
-; ALL-NEXT:    vpextrw $1, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vmovd %xmm1, %ecx
-; ALL-NEXT:    tzcntw %cx, %cx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $2, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $3, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $4, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $5, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $6, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $7, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm1
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; ALL-NEXT:    vpsubw %ymm0, %ymm2, %ymm3
+; ALL-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vpsubw %ymm3, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; ALL-NEXT:    vpand %ymm4, %ymm0, %ymm5
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; ALL-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
+; ALL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; ALL-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; ALL-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
+; ALL-NEXT:    vpaddb %ymm5, %ymm0, %ymm0
+; ALL-NEXT:    vpsllw $8, %ymm0, %ymm5
+; ALL-NEXT:    vpaddb %ymm0, %ymm5, %ymm0
+; ALL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; ALL-NEXT:    vpsubw %ymm1, %ymm2, %ymm2
+; ALL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; ALL-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; ALL-NEXT:    vpand %ymm4, %ymm1, %ymm2
+; ALL-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
+; ALL-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; ALL-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; ALL-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; ALL-NEXT:    vpaddb %ymm2, %ymm1, %ymm1
+; ALL-NEXT:    vpsllw $8, %ymm1, %ymm2
+; ALL-NEXT:    vpaddb %ymm1, %ymm2, %ymm1
+; ALL-NEXT:    vpsrlw $8, %ymm1, %ymm1
 ; ALL-NEXT:    retq
   %out = call <32 x i16> @llvm.cttz.v32i16(<32 x i16> %in, i1 0)
   ret <32 x i16> %out
@@ -319,106 +171,34 @@ define <32 x i16> @testv32i16(<32 x i16>
 define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
 ; ALL-LABEL: testv32i16u:
 ; ALL:       ## BB#0:
-; ALL-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; ALL-NEXT:    vpextrw $1, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vmovd %xmm2, %ecx
-; ALL-NEXT:    tzcntw %cx, %cx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $2, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $3, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $4, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $5, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $6, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $7, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
-; ALL-NEXT:    vpextrw $1, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vmovd %xmm0, %ecx
-; ALL-NEXT:    tzcntw %cx, %cx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $2, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $3, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $4, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $5, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $6, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $7, %xmm0, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm0
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; ALL-NEXT:    vpextrw $1, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vmovd %xmm2, %ecx
-; ALL-NEXT:    tzcntw %cx, %cx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $2, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $3, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $4, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $5, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $6, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $7, %xmm2, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
-; ALL-NEXT:    vpextrw $1, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vmovd %xmm1, %ecx
-; ALL-NEXT:    tzcntw %cx, %cx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $2, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $3, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $4, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $5, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $6, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrw $7, %xmm1, %eax
-; ALL-NEXT:    tzcntw %ax, %ax
-; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm1
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; ALL-NEXT:    vpsubw %ymm0, %ymm2, %ymm3
+; ALL-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vpsubw %ymm3, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; ALL-NEXT:    vpand %ymm4, %ymm0, %ymm5
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; ALL-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
+; ALL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; ALL-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; ALL-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
+; ALL-NEXT:    vpaddb %ymm5, %ymm0, %ymm0
+; ALL-NEXT:    vpsllw $8, %ymm0, %ymm5
+; ALL-NEXT:    vpaddb %ymm0, %ymm5, %ymm0
+; ALL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; ALL-NEXT:    vpsubw %ymm1, %ymm2, %ymm2
+; ALL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; ALL-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; ALL-NEXT:    vpand %ymm4, %ymm1, %ymm2
+; ALL-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
+; ALL-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; ALL-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; ALL-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; ALL-NEXT:    vpaddb %ymm2, %ymm1, %ymm1
+; ALL-NEXT:    vpsllw $8, %ymm1, %ymm2
+; ALL-NEXT:    vpaddb %ymm1, %ymm2, %ymm1
+; ALL-NEXT:    vpsrlw $8, %ymm1, %ymm1
 ; ALL-NEXT:    retq
   %out = call <32 x i16> @llvm.cttz.v32i16(<32 x i16> %in, i1 -1)
   ret <32 x i16> %out
@@ -427,331 +207,28 @@ define <32 x i16> @testv32i16u(<32 x i16
 define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; ALL-LABEL: testv64i8:
 ; ALL:       ## BB#0:
-; ALL-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; ALL-NEXT:    vpextrb $1, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    movl $8, %eax
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpextrb $0, %xmm2, %edx
-; ALL-NEXT:    tzcntl %edx, %edx
-; ALL-NEXT:    cmpl $32, %edx
-; ALL-NEXT:    cmovel %eax, %edx
-; ALL-NEXT:    vmovd %edx, %xmm3
-; ALL-NEXT:    vpinsrb $1, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $2, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $2, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $3, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $3, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $4, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $4, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $5, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $5, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $6, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $6, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $7, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $7, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $8, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $8, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $9, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $9, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $10, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $10, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $11, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $11, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $12, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $12, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $13, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $13, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $14, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $14, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $15, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $15, %ecx, %xmm3, %xmm2
-; ALL-NEXT:    vpextrb $1, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpextrb $0, %xmm0, %edx
-; ALL-NEXT:    tzcntl %edx, %edx
-; ALL-NEXT:    cmpl $32, %edx
-; ALL-NEXT:    cmovel %eax, %edx
-; ALL-NEXT:    vmovd %edx, %xmm3
-; ALL-NEXT:    vpinsrb $1, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $2, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $2, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $3, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $3, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $4, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $4, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $5, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $5, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $6, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $6, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $7, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $7, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $8, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $8, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $9, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $9, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $10, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $10, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $11, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $11, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $12, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $12, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $13, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $13, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $14, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $14, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $15, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $15, %ecx, %xmm3, %xmm0
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; ALL-NEXT:    vpextrb $1, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpextrb $0, %xmm2, %edx
-; ALL-NEXT:    tzcntl %edx, %edx
-; ALL-NEXT:    cmpl $32, %edx
-; ALL-NEXT:    cmovel %eax, %edx
-; ALL-NEXT:    vmovd %edx, %xmm3
-; ALL-NEXT:    vpinsrb $1, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $2, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $2, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $3, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $3, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $4, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $4, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $5, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $5, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $6, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $6, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $7, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $7, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $8, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $8, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $9, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $9, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $10, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $10, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $11, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $11, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $12, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $12, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $13, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $13, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $14, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $14, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $15, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $15, %ecx, %xmm3, %xmm2
-; ALL-NEXT:    vpextrb $1, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpextrb $0, %xmm1, %edx
-; ALL-NEXT:    tzcntl %edx, %edx
-; ALL-NEXT:    cmpl $32, %edx
-; ALL-NEXT:    cmovel %eax, %edx
-; ALL-NEXT:    vmovd %edx, %xmm3
-; ALL-NEXT:    vpinsrb $1, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $2, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $2, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $3, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $3, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $4, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $4, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $5, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $5, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $6, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $6, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $7, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $7, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $8, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $8, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $9, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $9, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $10, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $10, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $11, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $11, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $12, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $12, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $13, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $13, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $14, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $14, %ecx, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $15, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    cmpl $32, %ecx
-; ALL-NEXT:    cmovel %eax, %ecx
-; ALL-NEXT:    vpinsrb $15, %ecx, %xmm3, %xmm1
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; ALL-NEXT:    vpsubb %ymm0, %ymm2, %ymm3
+; ALL-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vpsubb %ymm3, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; ALL-NEXT:    vpand %ymm4, %ymm0, %ymm5
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; ALL-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
+; ALL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; ALL-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; ALL-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
+; ALL-NEXT:    vpaddb %ymm5, %ymm0, %ymm0
+; ALL-NEXT:    vpsubb %ymm1, %ymm2, %ymm2
+; ALL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; ALL-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; ALL-NEXT:    vpand %ymm4, %ymm1, %ymm2
+; ALL-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
+; ALL-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; ALL-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; ALL-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; ALL-NEXT:    vpaddb %ymm2, %ymm1, %ymm1
 ; ALL-NEXT:    retq
   %out = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %in, i1 0)
   ret <64 x i8> %out
@@ -760,202 +237,28 @@ define <64 x i8> @testv64i8(<64 x i8> %i
 define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; ALL-LABEL: testv64i8u:
 ; ALL:       ## BB#0:
-; ALL-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; ALL-NEXT:    vpextrb $1, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpextrb $0, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $2, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $3, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $4, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $5, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $6, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $7, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $8, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $9, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $10, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $11, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $12, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $13, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $14, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $15, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm2
-; ALL-NEXT:    vpextrb $1, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpextrb $0, %xmm0, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $2, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $3, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $4, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $5, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $6, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $7, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $8, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $9, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $10, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $11, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $12, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $13, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $14, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $15, %xmm0, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm0
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; ALL-NEXT:    vpextrb $1, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpextrb $0, %xmm2, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $2, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $3, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $4, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $5, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $6, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $7, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $8, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $9, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $10, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $11, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $12, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $13, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $14, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $15, %xmm2, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm2
-; ALL-NEXT:    vpextrb $1, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpextrb $0, %xmm1, %ecx
-; ALL-NEXT:    tzcntl %ecx, %ecx
-; ALL-NEXT:    vmovd %ecx, %xmm3
-; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $2, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $3, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $4, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $5, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $6, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $7, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $8, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $9, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $10, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $11, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $12, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $13, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $14, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT:    vpextrb $15, %xmm1, %eax
-; ALL-NEXT:    tzcntl %eax, %eax
-; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm1
-; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; ALL-NEXT:    vpsubb %ymm0, %ymm2, %ymm3
+; ALL-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vpsubb %ymm3, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; ALL-NEXT:    vpand %ymm4, %ymm0, %ymm5
+; ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; ALL-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
+; ALL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; ALL-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; ALL-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
+; ALL-NEXT:    vpaddb %ymm5, %ymm0, %ymm0
+; ALL-NEXT:    vpsubb %ymm1, %ymm2, %ymm2
+; ALL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; ALL-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; ALL-NEXT:    vpand %ymm4, %ymm1, %ymm2
+; ALL-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
+; ALL-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; ALL-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; ALL-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; ALL-NEXT:    vpaddb %ymm2, %ymm1, %ymm1
 ; ALL-NEXT:    retq
   %out = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %in, i1 -1)
   ret <64 x i8> %out




More information about the llvm-commits mailing list