[llvm] [X86] Lower CTTZ/CTLZ vXi8 vectors using GF2P8AFFINEQB (PR #118012)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 28 06:58:28 PST 2024


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/118012

>From 6b5dae9d78da0d5db5f7a3697e0039ba98c9cec6 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Thu, 28 Nov 2024 14:40:31 +0000
Subject: [PATCH] [X86] Lower CTTZ/CTLZ vXi8 vectors using GF2P8AFFINEQB

CTTZ can be lowered using GF2P8AFFINEQB if we isolate the lowest set bit (or is zero) and the use GF2P8AFFINEQB to perform a look up

With CTTZ, CTLZ can be lowered as CTTZ(BITREVERSE())

As discussed on #110308
---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 102 ++--
 llvm/test/CodeGen/X86/gfni-lzcnt.ll     | 562 ++++++++-------------
 llvm/test/CodeGen/X86/gfni-tzcnt.ll     | 622 +++++++-----------------
 3 files changed, 441 insertions(+), 845 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d490de06590f78..9d7146a18ffe80 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1330,6 +1330,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
     setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
     setOperationAction(ISD::BITREVERSE, MVT::i64, Custom);
+    setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
+    setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
   }
 
   if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
@@ -1695,6 +1697,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
                        MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
         setOperationAction(ISD::MGATHER,  VT, Custom);
     }
+
+    if (Subtarget.hasGFNI()) {
+      setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
+      setOperationAction(ISD::CTTZ, MVT::v32i8, Custom);
+    }
   }
 
   if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
@@ -2079,6 +2086,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
     setOperationAction(ISD::FNEG, MVT::v32f16, Custom);
     setOperationAction(ISD::FABS, MVT::v32f16, Custom);
     setOperationAction(ISD::FCOPYSIGN, MVT::v32f16, Custom);
+
+    if (Subtarget.hasGFNI()) {
+      setOperationAction(ISD::CTLZ, MVT::v64i8, Custom);
+      setOperationAction(ISD::CTTZ, MVT::v64i8, Custom);
+    }
   }// useAVX512Regs
 
   if (!Subtarget.useSoftFloat() && Subtarget.hasVBMI2()) {
@@ -28412,6 +28424,46 @@ SDValue X86TargetLowering::LowerRESET_FPENV(SDValue Op,
   return createSetFPEnvNodes(Env, Chain, DL, MVT::i32, MMO, DAG, Subtarget);
 }
 
+// Generate a GFNI gf2p8affine bitmask for vXi8 bitreverse/shift/rotate.
+uint64_t getGFNICtrlImm(unsigned Opcode, unsigned Amt = 0) {
+  assert((Amt < 8) && "Shift/Rotation amount out of range");
+  switch (Opcode) {
+  case ISD::BITREVERSE:
+    return 0x8040201008040201ULL;
+  case ISD::CTTZ:
+    // Special case - only works for zero/single bit input.
+    return 0xAACCF0FF00000000ULL;
+  case ISD::SHL:
+    return ((0x0102040810204080ULL >> (Amt)) &
+            (0x0101010101010101ULL * (0xFF >> (Amt))));
+  case ISD::SRL:
+    return ((0x0102040810204080ULL << (Amt)) &
+            (0x0101010101010101ULL * ((0xFF << (Amt)) & 0xFF)));
+  case ISD::SRA:
+    return (getGFNICtrlImm(ISD::SRL, Amt) |
+            (0x8080808080808080ULL >> (64 - (8 * Amt))));
+  case ISD::ROTL:
+    return getGFNICtrlImm(ISD::SRL, 8 - Amt) | getGFNICtrlImm(ISD::SHL, Amt);
+  case ISD::ROTR:
+    return getGFNICtrlImm(ISD::SHL, 8 - Amt) | getGFNICtrlImm(ISD::SRL, Amt);
+  }
+  llvm_unreachable("Unsupported GFNI opcode");
+}
+
+// Generate a GFNI gf2p8affine bitmask for vXi8 bitreverse/shift/rotate.
+SDValue getGFNICtrlMask(unsigned Opcode, SelectionDAG &DAG, const SDLoc &DL,
+                        MVT VT, unsigned Amt = 0) {
+  assert(VT.getVectorElementType() == MVT::i8 &&
+         (VT.getSizeInBits() % 64) == 0 && "Illegal GFNI control type");
+  uint64_t Imm = getGFNICtrlImm(Opcode, Amt);
+  SmallVector<SDValue> MaskBits;
+  for (unsigned I = 0, E = VT.getSizeInBits(); I != E; I += 8) {
+    uint64_t Bits = (Imm >> (I % 64)) & 255;
+    MaskBits.push_back(DAG.getConstant(Bits, DL, MVT::i8));
+  }
+  return DAG.getBuildVector(VT, DL, MaskBits);
+}
+
 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
 //
 // i8/i16 vector implemented using dword LZCNT vector instruction
@@ -28535,6 +28587,11 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
                                SelectionDAG &DAG) {
   MVT VT = Op.getSimpleValueType();
 
+  // GFNI targets - fold as cttz(bitreverse())
+  if (Subtarget.hasGFNI() && VT.getVectorElementType() == MVT::i8)
+    return DAG.getNode(ISD::CTTZ, DL, VT,
+                       DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0)));
+
   if (Subtarget.hasCDI() &&
       // vXi8 vectors need to be promoted to 512-bits for vXi32.
       (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
@@ -28598,6 +28655,14 @@ static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
   SDValue N0 = Op.getOperand(0);
   SDLoc dl(Op);
 
+  // GFNI - isolate LSB and perform GF2P8AFFINEQB lookup.
+  if (Subtarget.hasGFNI() && VT.isVector()) {
+    SDValue B = DAG.getNode(ISD::AND, dl, VT, N0, DAG.getNegative(N0, dl, VT));
+    SDValue M = getGFNICtrlMask(ISD::CTTZ, DAG, dl, VT);
+    return DAG.getNode(X86ISD::GF2P8AFFINEQB, dl, VT, B, M,
+                       DAG.getTargetConstant(0x8, dl, MVT::i8));
+  }
+
   assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
          "Only scalar CTTZ requires custom lowering");
 
@@ -29597,43 +29662,6 @@ SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
   return IsStrict ? DAG.getMergeValues({Result, Chain}, dl) : Result;
 }
 
-// Generate a GFNI gf2p8affine bitmask for vXi8 bitreverse/shift/rotate.
-uint64_t getGFNICtrlImm(unsigned Opcode, unsigned Amt = 0) {
-  assert((Amt < 8) && "Shift/Rotation amount out of range");
-  switch (Opcode) {
-  case ISD::BITREVERSE:
-    return 0x8040201008040201ULL;
-  case ISD::SHL:
-    return ((0x0102040810204080ULL >> (Amt)) &
-            (0x0101010101010101ULL * (0xFF >> (Amt))));
-  case ISD::SRL:
-    return ((0x0102040810204080ULL << (Amt)) &
-            (0x0101010101010101ULL * ((0xFF << (Amt)) & 0xFF)));
-  case ISD::SRA:
-    return (getGFNICtrlImm(ISD::SRL, Amt) |
-            (0x8080808080808080ULL >> (64 - (8 * Amt))));
-  case ISD::ROTL:
-    return getGFNICtrlImm(ISD::SRL, 8 - Amt) | getGFNICtrlImm(ISD::SHL, Amt);
-  case ISD::ROTR:
-    return getGFNICtrlImm(ISD::SHL, 8 - Amt) | getGFNICtrlImm(ISD::SRL, Amt);
-  }
-  llvm_unreachable("Unsupported GFNI opcode");
-}
-
-// Generate a GFNI gf2p8affine bitmask for vXi8 bitreverse/shift/rotate.
-SDValue getGFNICtrlMask(unsigned Opcode, SelectionDAG &DAG, const SDLoc &DL, MVT VT,
-                        unsigned Amt = 0) {
-  assert(VT.getVectorElementType() == MVT::i8 &&
-         (VT.getSizeInBits() % 64) == 0 && "Illegal GFNI control type");
-  uint64_t Imm = getGFNICtrlImm(Opcode, Amt);
-  SmallVector<SDValue> MaskBits;
-  for (unsigned I = 0, E = VT.getSizeInBits(); I != E; I += 8) {
-    uint64_t Bits = (Imm >> (I % 64)) & 255;
-    MaskBits.push_back(DAG.getConstant(Bits, DL, MVT::i8));
-  }
-  return DAG.getBuildVector(VT, DL, MaskBits);
-}
-
 // Return true if the required (according to Opcode) shift-imm form is natively
 // supported by the Subtarget
 static bool supportedVectorShiftWithImm(EVT VT, const X86Subtarget &Subtarget,
diff --git a/llvm/test/CodeGen/X86/gfni-lzcnt.ll b/llvm/test/CodeGen/X86/gfni-lzcnt.ll
index e84af84b36aa9e..5e7894d821d48f 100644
--- a/llvm/test/CodeGen/X86/gfni-lzcnt.ll
+++ b/llvm/test/CodeGen/X86/gfni-lzcnt.ll
@@ -8,40 +8,29 @@
 define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm2
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm2
 ; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT:    pxor %xmm3, %xmm3
-; GFNISSE-NEXT:    pcmpeqb %xmm0, %xmm3
-; GFNISSE-NEXT:    pand %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm1
-; GFNISSE-NEXT:    paddb %xmm3, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
+; GFNISSE-NEXT:    pxor %xmm1, %xmm1
+; GFNISSE-NEXT:    psubb %xmm0, %xmm1
+; GFNISSE-NEXT:    pand %xmm1, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1OR2-LABEL: testv16i8:
 ; GFNIAVX1OR2:       # %bb.0:
-; GFNIAVX1OR2-NEXT:    vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
 ; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX1OR2-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm3
-; GFNIAVX1OR2-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; GFNIAVX1OR2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX1OR2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX512-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm3
-; GFNIAVX512-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; GFNIAVX512-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0)
   ret <16 x i8> %out
@@ -50,40 +39,29 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm2
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm2
 ; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT:    pxor %xmm3, %xmm3
-; GFNISSE-NEXT:    pcmpeqb %xmm0, %xmm3
-; GFNISSE-NEXT:    pand %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm1
-; GFNISSE-NEXT:    paddb %xmm3, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
+; GFNISSE-NEXT:    pxor %xmm1, %xmm1
+; GFNISSE-NEXT:    psubb %xmm0, %xmm1
+; GFNISSE-NEXT:    pand %xmm1, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1OR2-LABEL: testv16i8u:
 ; GFNIAVX1OR2:       # %bb.0:
-; GFNIAVX1OR2-NEXT:    vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
 ; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX1OR2-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm3
-; GFNIAVX1OR2-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; GFNIAVX1OR2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX1OR2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8u:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX512-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm3
-; GFNIAVX512-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; GFNIAVX512-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1)
   ret <16 x i8> %out
@@ -92,73 +70,52 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm3
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
-; GFNISSE-NEXT:    pxor %xmm5, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm6
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm6
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm0
-; GFNISSE-NEXT:    pand %xmm3, %xmm0
-; GFNISSE-NEXT:    paddb %xmm6, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm3
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
-; GFNISSE-NEXT:    pcmpeqb %xmm1, %xmm5
-; GFNISSE-NEXT:    pand %xmm3, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm2
-; GFNISSE-NEXT:    paddb %xmm5, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE-NEXT:    pxor %xmm3, %xmm3
+; GFNISSE-NEXT:    pxor %xmm4, %xmm4
+; GFNISSE-NEXT:    psubb %xmm0, %xmm4
+; GFNISSE-NEXT:    pand %xmm4, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm1
+; GFNISSE-NEXT:    psubb %xmm1, %xmm3
+; GFNISSE-NEXT:    pand %xmm3, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8:
 ; GFNIAVX1:       # %bb.0:
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX1-NEXT:    vmovq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm3
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm4 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm5, %xmm1, %xmm6
-; GFNIAVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm3
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm5, %xmm0, %xmm4
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm0, %xmm3, %xmm0
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX1-NEXT:    # xmm2 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; GFNIAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm3, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandps %ymm0, %ymm2, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX2-NEXT:    # ymm1 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm3
-; GFNIAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv32i8:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512-NEXT:    # ymm1 = mem[0,1,0,1]
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX512-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm3
-; GFNIAVX512-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
-; GFNIAVX512-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0)
   ret <32 x i8> %out
@@ -167,73 +124,52 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm3
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
-; GFNISSE-NEXT:    pxor %xmm5, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm6
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm6
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm0
-; GFNISSE-NEXT:    pand %xmm3, %xmm0
-; GFNISSE-NEXT:    paddb %xmm6, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm3
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
-; GFNISSE-NEXT:    pcmpeqb %xmm1, %xmm5
-; GFNISSE-NEXT:    pand %xmm3, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm2
-; GFNISSE-NEXT:    paddb %xmm5, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE-NEXT:    pxor %xmm3, %xmm3
+; GFNISSE-NEXT:    pxor %xmm4, %xmm4
+; GFNISSE-NEXT:    psubb %xmm0, %xmm4
+; GFNISSE-NEXT:    pand %xmm4, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm1
+; GFNISSE-NEXT:    psubb %xmm1, %xmm3
+; GFNISSE-NEXT:    pand %xmm3, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8u:
 ; GFNIAVX1:       # %bb.0:
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX1-NEXT:    vmovq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm3
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm4 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm5, %xmm1, %xmm6
-; GFNIAVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm3
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm5, %xmm0, %xmm4
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm0, %xmm3, %xmm0
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX1-NEXT:    # xmm2 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; GFNIAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm3, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandps %ymm0, %ymm2, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8u:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX2-NEXT:    # ymm1 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm3
-; GFNIAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv32i8u:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512-NEXT:    # ymm1 = mem[0,1,0,1]
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX512-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm3
-; GFNIAVX512-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
-; GFNIAVX512-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1)
   ret <32 x i8> %out
@@ -242,132 +178,93 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm4
-; GFNISSE-NEXT:    movq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm7
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
 ; GFNISSE-NEXT:    pxor %xmm5, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm0
-; GFNISSE-NEXT:    pand %xmm7, %xmm0
-; GFNISSE-NEXT:    paddb %xmm8, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm1
+; GFNISSE-NEXT:    pxor %xmm6, %xmm6
+; GFNISSE-NEXT:    psubb %xmm0, %xmm6
+; GFNISSE-NEXT:    pand %xmm6, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm6 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
+; GFNISSE-NEXT:    pxor %xmm7, %xmm7
+; GFNISSE-NEXT:    psubb %xmm1, %xmm7
 ; GFNISSE-NEXT:    pand %xmm7, %xmm1
-; GFNISSE-NEXT:    paddb %xmm8, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm2
+; GFNISSE-NEXT:    pxor %xmm7, %xmm7
+; GFNISSE-NEXT:    psubb %xmm2, %xmm7
 ; GFNISSE-NEXT:    pand %xmm7, %xmm2
-; GFNISSE-NEXT:    paddb %xmm8, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm4, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm4
-; GFNISSE-NEXT:    pcmpeqb %xmm4, %xmm5
-; GFNISSE-NEXT:    pand %xmm7, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm4, %xmm3
-; GFNISSE-NEXT:    paddb %xmm5, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm3
+; GFNISSE-NEXT:    psubb %xmm3, %xmm5
+; GFNISSE-NEXT:    pand %xmm5, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv64i8:
 ; GFNIAVX1:       # %bb.0:
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX1-NEXT:    vmovq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm5 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm5 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpxor %xmm6, %xmm6, %xmm6
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm2, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm0, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm0, %xmm4, %xmm0
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX1-NEXT:    # xmm3 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; GFNIAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; GFNIAVX1-NEXT:    vpsubb %xmm2, %xmm5, %xmm2
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm5, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm2, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm1, %xmm5
-; GFNIAVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm1, %xmm4, %xmm1
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vandps %ymm0, %ymm4, %ymm0
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm4, %xmm4
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm3
+; GFNIAVX1-NEXT:    vpsubb %xmm4, %xmm5, %xmm4
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm5, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vandps %ymm1, %ymm3, %ymm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv64i8:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX2-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm5, %ymm0, %ymm6
-; GFNIAVX2-NEXT:    vpand %ymm6, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm0, %ymm3, %ymm0
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm5, %ymm1, %ymm4
-; GFNIAVX2-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm3, %ymm4
+; GFNIAVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpsubb %ymm1, %ymm3, %ymm2
+; GFNIAVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512VL-LABEL: testv64i8:
 ; GFNIAVX512VL:       # %bb.0:
 ; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; GFNIAVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512VL-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
-; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX512VL-NEXT:    vpcmpeqb %ymm5, %ymm1, %ymm6
-; GFNIAVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vpcmpeqb %ymm5, %ymm0, %ymm4
-; GFNIAVX512VL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX512VL-NEXT:    vpaddb %ymm0, %ymm3, %ymm0
+; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
+; GFNIAVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX512VL-NEXT:    vpsubb %ymm1, %ymm3, %ymm1
+; GFNIAVX512VL-NEXT:    vpsubb %ymm0, %ymm3, %ymm0
 ; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpandq %zmm0, %zmm2, %zmm0
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8:
 ; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; GFNIAVX512BW-NEXT:    vpshufb %zmm1, %zmm2, %zmm3
-; GFNIAVX512BW-NEXT:    vpshufb %zmm0, %zmm2, %zmm0
-; GFNIAVX512BW-NEXT:    vptestnmb %zmm1, %zmm1, %k0
-; GFNIAVX512BW-NEXT:    vpmovm2b %k0, %zmm1
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; GFNIAVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpaddb %zmm3, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0)
   ret <64 x i8> %out
@@ -376,132 +273,93 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm4
-; GFNISSE-NEXT:    movq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm7
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
 ; GFNISSE-NEXT:    pxor %xmm5, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm0
-; GFNISSE-NEXT:    pand %xmm7, %xmm0
-; GFNISSE-NEXT:    paddb %xmm8, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm1
+; GFNISSE-NEXT:    pxor %xmm6, %xmm6
+; GFNISSE-NEXT:    psubb %xmm0, %xmm6
+; GFNISSE-NEXT:    pand %xmm6, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm6 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
+; GFNISSE-NEXT:    pxor %xmm7, %xmm7
+; GFNISSE-NEXT:    psubb %xmm1, %xmm7
 ; GFNISSE-NEXT:    pand %xmm7, %xmm1
-; GFNISSE-NEXT:    paddb %xmm8, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm2
+; GFNISSE-NEXT:    pxor %xmm7, %xmm7
+; GFNISSE-NEXT:    psubb %xmm2, %xmm7
 ; GFNISSE-NEXT:    pand %xmm7, %xmm2
-; GFNISSE-NEXT:    paddb %xmm8, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm4, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm4
-; GFNISSE-NEXT:    pcmpeqb %xmm4, %xmm5
-; GFNISSE-NEXT:    pand %xmm7, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm4, %xmm3
-; GFNISSE-NEXT:    paddb %xmm5, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm3
+; GFNISSE-NEXT:    psubb %xmm3, %xmm5
+; GFNISSE-NEXT:    pand %xmm5, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv64i8u:
 ; GFNIAVX1:       # %bb.0:
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX1-NEXT:    vmovq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm5 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm5 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpxor %xmm6, %xmm6, %xmm6
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm2, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm0, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm0, %xmm4, %xmm0
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX1-NEXT:    # xmm3 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; GFNIAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; GFNIAVX1-NEXT:    vpsubb %xmm2, %xmm5, %xmm2
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm5, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm2, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm1, %xmm5
-; GFNIAVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm1, %xmm4, %xmm1
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vandps %ymm0, %ymm4, %ymm0
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm4, %xmm4
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm3
+; GFNIAVX1-NEXT:    vpsubb %xmm4, %xmm5, %xmm4
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm5, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vandps %ymm1, %ymm3, %ymm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv64i8u:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX2-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm5, %ymm0, %ymm6
-; GFNIAVX2-NEXT:    vpand %ymm6, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm0, %ymm3, %ymm0
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm5, %ymm1, %ymm4
-; GFNIAVX2-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm3, %ymm4
+; GFNIAVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpsubb %ymm1, %ymm3, %ymm2
+; GFNIAVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512VL-LABEL: testv64i8u:
 ; GFNIAVX512VL:       # %bb.0:
 ; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; GFNIAVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512VL-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
-; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX512VL-NEXT:    vpcmpeqb %ymm5, %ymm1, %ymm6
-; GFNIAVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vpcmpeqb %ymm5, %ymm0, %ymm4
-; GFNIAVX512VL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX512VL-NEXT:    vpaddb %ymm0, %ymm3, %ymm0
+; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
+; GFNIAVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX512VL-NEXT:    vpsubb %ymm1, %ymm3, %ymm1
+; GFNIAVX512VL-NEXT:    vpsubb %ymm0, %ymm3, %ymm0
 ; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpandq %zmm0, %zmm2, %zmm0
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8u:
 ; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; GFNIAVX512BW-NEXT:    vpshufb %zmm1, %zmm2, %zmm3
-; GFNIAVX512BW-NEXT:    vpshufb %zmm0, %zmm2, %zmm0
-; GFNIAVX512BW-NEXT:    vptestnmb %zmm1, %zmm1, %k0
-; GFNIAVX512BW-NEXT:    vpmovm2b %k0, %zmm1
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; GFNIAVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpaddb %zmm3, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1)
   ret <64 x i8> %out
diff --git a/llvm/test/CodeGen/X86/gfni-tzcnt.ll b/llvm/test/CodeGen/X86/gfni-tzcnt.ll
index f424483c53e2ce..533243f49250d4 100644
--- a/llvm/test/CodeGen/X86/gfni-tzcnt.ll
+++ b/llvm/test/CodeGen/X86/gfni-tzcnt.ll
@@ -8,44 +8,26 @@
 define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    pcmpeqd %xmm1, %xmm1
-; GFNISSE-NEXT:    paddb %xmm0, %xmm1
-; GFNISSE-NEXT:    pandn %xmm1, %xmm0
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNISSE-NEXT:    pand %xmm0, %xmm2
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm3
-; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm1
-; GFNISSE-NEXT:    paddb %xmm3, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
+; GFNISSE-NEXT:    pxor %xmm1, %xmm1
+; GFNISSE-NEXT:    psubb %xmm0, %xmm1
+; GFNISSE-NEXT:    pand %xmm1, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1OR2-LABEL: testv16i8:
 ; GFNIAVX1OR2:       # %bb.0:
-; GFNIAVX1OR2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
-; GFNIAVX1OR2-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; GFNIAVX1OR2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX1OR2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; GFNIAVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
-; GFNIAVX512-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
-; GFNIAVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX512-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; GFNIAVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 0)
   ret <16 x i8> %out
@@ -54,44 +36,26 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    pcmpeqd %xmm1, %xmm1
-; GFNISSE-NEXT:    paddb %xmm0, %xmm1
-; GFNISSE-NEXT:    pandn %xmm1, %xmm0
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNISSE-NEXT:    pand %xmm0, %xmm2
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm3
-; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm1
-; GFNISSE-NEXT:    paddb %xmm3, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
+; GFNISSE-NEXT:    pxor %xmm1, %xmm1
+; GFNISSE-NEXT:    psubb %xmm0, %xmm1
+; GFNISSE-NEXT:    pand %xmm1, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1OR2-LABEL: testv16i8u:
 ; GFNIAVX1OR2:       # %bb.0:
-; GFNIAVX1OR2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
-; GFNIAVX1OR2-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; GFNIAVX1OR2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX1OR2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8u:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; GFNIAVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
-; GFNIAVX512-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
-; GFNIAVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX512-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; GFNIAVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 -1)
   ret <16 x i8> %out
@@ -100,84 +64,42 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    pcmpeqd %xmm4, %xmm4
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm2
-; GFNISSE-NEXT:    paddb %xmm4, %xmm2
-; GFNISSE-NEXT:    pandn %xmm2, %xmm0
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm3
-; GFNISSE-NEXT:    pand %xmm5, %xmm3
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm6
-; GFNISSE-NEXT:    pshufb %xmm3, %xmm6
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm7 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm7, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm3
-; GFNISSE-NEXT:    paddb %xmm6, %xmm3
-; GFNISSE-NEXT:    paddb %xmm1, %xmm4
-; GFNISSE-NEXT:    pandn %xmm4, %xmm1
-; GFNISSE-NEXT:    pand %xmm1, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm0
-; GFNISSE-NEXT:    pshufb %xmm5, %xmm0
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm7, %xmm1
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm2
-; GFNISSE-NEXT:    paddb %xmm0, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
+; GFNISSE-NEXT:    pxor %xmm2, %xmm2
+; GFNISSE-NEXT:    pxor %xmm3, %xmm3
+; GFNISSE-NEXT:    psubb %xmm0, %xmm3
+; GFNISSE-NEXT:    pand %xmm3, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm3 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm3, %xmm0
+; GFNISSE-NEXT:    psubb %xmm1, %xmm2
+; GFNISSE-NEXT:    pand %xmm2, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm3, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8:
 ; GFNIAVX1:       # %bb.0:
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm1, %xmm3
-; GFNIAVX1-NEXT:    vpandn %xmm3, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vbroadcastss {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNIAVX1-NEXT:    vpand %xmm3, %xmm1, %xmm4
-; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX1-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm6 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm6, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
-; GFNIAVX1-NEXT:    vpandn %xmm2, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpand %xmm3, %xmm0, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm6, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; GFNIAVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
-; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX2-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv32i8:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; GFNIAVX512-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
-; GFNIAVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1
-; GFNIAVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX512-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX512-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX512-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 0)
   ret <32 x i8> %out
@@ -186,84 +108,42 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    pcmpeqd %xmm4, %xmm4
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm2
-; GFNISSE-NEXT:    paddb %xmm4, %xmm2
-; GFNISSE-NEXT:    pandn %xmm2, %xmm0
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm3
-; GFNISSE-NEXT:    pand %xmm5, %xmm3
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm6
-; GFNISSE-NEXT:    pshufb %xmm3, %xmm6
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm7 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm7, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm3
-; GFNISSE-NEXT:    paddb %xmm6, %xmm3
-; GFNISSE-NEXT:    paddb %xmm1, %xmm4
-; GFNISSE-NEXT:    pandn %xmm4, %xmm1
-; GFNISSE-NEXT:    pand %xmm1, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm0
-; GFNISSE-NEXT:    pshufb %xmm5, %xmm0
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm7, %xmm1
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm2
-; GFNISSE-NEXT:    paddb %xmm0, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
+; GFNISSE-NEXT:    pxor %xmm2, %xmm2
+; GFNISSE-NEXT:    pxor %xmm3, %xmm3
+; GFNISSE-NEXT:    psubb %xmm0, %xmm3
+; GFNISSE-NEXT:    pand %xmm3, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm3 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm3, %xmm0
+; GFNISSE-NEXT:    psubb %xmm1, %xmm2
+; GFNISSE-NEXT:    pand %xmm2, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm3, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8u:
 ; GFNIAVX1:       # %bb.0:
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm1, %xmm3
-; GFNIAVX1-NEXT:    vpandn %xmm3, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vbroadcastss {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNIAVX1-NEXT:    vpand %xmm3, %xmm1, %xmm4
-; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX1-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm6 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm6, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
-; GFNIAVX1-NEXT:    vpandn %xmm2, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpand %xmm3, %xmm0, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm6, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; GFNIAVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8u:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
-; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX2-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv32i8u:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; GFNIAVX512-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
-; GFNIAVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1
-; GFNIAVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX512-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX512-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX512-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 -1)
   ret <32 x i8> %out
@@ -272,157 +152,72 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm1
-; GFNISSE-NEXT:    pcmpeqd %xmm6, %xmm6
-; GFNISSE-NEXT:    paddb %xmm6, %xmm0
-; GFNISSE-NEXT:    pandn %xmm0, %xmm1
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
-; GFNISSE-NEXT:    pand %xmm7, %xmm0
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm9
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm9
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm0
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm0
-; GFNISSE-NEXT:    paddb %xmm9, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm5, %xmm1
-; GFNISSE-NEXT:    paddb %xmm6, %xmm1
-; GFNISSE-NEXT:    pandn %xmm1, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm5, %xmm1
-; GFNISSE-NEXT:    pand %xmm7, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm9
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm9
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm1
-; GFNISSE-NEXT:    pshufb %xmm5, %xmm1
-; GFNISSE-NEXT:    paddb %xmm9, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm5
-; GFNISSE-NEXT:    paddb %xmm6, %xmm5
-; GFNISSE-NEXT:    pandn %xmm5, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm5
-; GFNISSE-NEXT:    pand %xmm7, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm9
-; GFNISSE-NEXT:    pshufb %xmm5, %xmm9
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm5
-; GFNISSE-NEXT:    paddb %xmm9, %xmm5
-; GFNISSE-NEXT:    paddb %xmm3, %xmm6
-; GFNISSE-NEXT:    pandn %xmm6, %xmm3
-; GFNISSE-NEXT:    pand %xmm3, %xmm7
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm2
-; GFNISSE-NEXT:    pshufb %xmm7, %xmm2
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm3, %xmm4
-; GFNISSE-NEXT:    paddb %xmm2, %xmm4
-; GFNISSE-NEXT:    movdqa %xmm5, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm3
+; GFNISSE-NEXT:    pxor %xmm4, %xmm4
+; GFNISSE-NEXT:    pxor %xmm5, %xmm5
+; GFNISSE-NEXT:    psubb %xmm0, %xmm5
+; GFNISSE-NEXT:    pand %xmm5, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm5 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm5, %xmm0
+; GFNISSE-NEXT:    pxor %xmm6, %xmm6
+; GFNISSE-NEXT:    psubb %xmm1, %xmm6
+; GFNISSE-NEXT:    pand %xmm6, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm5, %xmm1
+; GFNISSE-NEXT:    pxor %xmm6, %xmm6
+; GFNISSE-NEXT:    psubb %xmm2, %xmm6
+; GFNISSE-NEXT:    pand %xmm6, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm5, %xmm2
+; GFNISSE-NEXT:    psubb %xmm3, %xmm4
+; GFNISSE-NEXT:    pand %xmm4, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm5, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv64i8:
 ; GFNIAVX1:       # %bb.0:
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm4
-; GFNIAVX1-NEXT:    vpandn %xmm4, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm2, %xmm5
-; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm7 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm7 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm7, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm5
-; GFNIAVX1-NEXT:    vpandn %xmm5, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm0, %xmm5
-; GFNIAVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm7, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm5
-; GFNIAVX1-NEXT:    vpandn %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm2, %xmm5
-; GFNIAVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm7, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm3
-; GFNIAVX1-NEXT:    vpandn %xmm3, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm1, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm7, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpsubb %xmm2, %xmm3, %xmm2
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm3, %xmm4
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
+; GFNIAVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; GFNIAVX1-NEXT:    vpsubb %xmm4, %xmm3, %xmm4
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; GFNIAVX1-NEXT:    vandps %ymm3, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv64i8:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm3
-; GFNIAVX2-NEXT:    vpandn %ymm3, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpbroadcastb {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNIAVX2-NEXT:    vpand %ymm3, %ymm0, %ymm4
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX2-NEXT:    # ymm5 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm6, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm4, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm1, %ymm2
-; GFNIAVX2-NEXT:    vpandn %ymm2, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpand %ymm3, %ymm1, %ymm2
-; GFNIAVX2-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm6, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm2, %ymm3
+; GFNIAVX2-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm3, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpsubb %ymm1, %ymm2, %ymm2
+; GFNIAVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm3, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512VL-LABEL: testv64i8:
 ; GFNIAVX512VL:       # %bb.0:
 ; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; GFNIAVX512VL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; GFNIAVX512VL-NEXT:    vpaddb %ymm2, %ymm1, %ymm3
-; GFNIAVX512VL-NEXT:    vpandn %ymm3, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNIAVX512VL-NEXT:    vpand %ymm3, %ymm1, %ymm4
-; GFNIAVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX512VL-NEXT:    # ymm5 = mem[0,1,0,1]
-; GFNIAVX512VL-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm6, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm4, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
-; GFNIAVX512VL-NEXT:    vpandn %ymm2, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vpand %ymm3, %ymm0, %ymm2
-; GFNIAVX512VL-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm6, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; GFNIAVX512VL-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; GFNIAVX512VL-NEXT:    vpsubb %ymm1, %ymm2, %ymm1
+; GFNIAVX512VL-NEXT:    vpsubb %ymm0, %ymm2, %ymm2
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; GFNIAVX512VL-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8:
 ; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
-; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vpandnq %zmm1, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; GFNIAVX512BW-NEXT:    vpshufb %zmm1, %zmm2, %zmm1
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpshufb %zmm0, %zmm2, %zmm0
-; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
+; GFNIAVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %in, i1 0)
   ret <64 x i8> %out
@@ -431,157 +226,72 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm1
-; GFNISSE-NEXT:    pcmpeqd %xmm6, %xmm6
-; GFNISSE-NEXT:    paddb %xmm6, %xmm0
-; GFNISSE-NEXT:    pandn %xmm0, %xmm1
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
-; GFNISSE-NEXT:    pand %xmm7, %xmm0
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm9
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm9
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm0
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm0
-; GFNISSE-NEXT:    paddb %xmm9, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm5, %xmm1
-; GFNISSE-NEXT:    paddb %xmm6, %xmm1
-; GFNISSE-NEXT:    pandn %xmm1, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm5, %xmm1
-; GFNISSE-NEXT:    pand %xmm7, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm9
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm9
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm1
-; GFNISSE-NEXT:    pshufb %xmm5, %xmm1
-; GFNISSE-NEXT:    paddb %xmm9, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm5
-; GFNISSE-NEXT:    paddb %xmm6, %xmm5
-; GFNISSE-NEXT:    pandn %xmm5, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm5
-; GFNISSE-NEXT:    pand %xmm7, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm9
-; GFNISSE-NEXT:    pshufb %xmm5, %xmm9
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm5
-; GFNISSE-NEXT:    paddb %xmm9, %xmm5
-; GFNISSE-NEXT:    paddb %xmm3, %xmm6
-; GFNISSE-NEXT:    pandn %xmm6, %xmm3
-; GFNISSE-NEXT:    pand %xmm3, %xmm7
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm2
-; GFNISSE-NEXT:    pshufb %xmm7, %xmm2
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm3, %xmm4
-; GFNISSE-NEXT:    paddb %xmm2, %xmm4
-; GFNISSE-NEXT:    movdqa %xmm5, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm3
+; GFNISSE-NEXT:    pxor %xmm4, %xmm4
+; GFNISSE-NEXT:    pxor %xmm5, %xmm5
+; GFNISSE-NEXT:    psubb %xmm0, %xmm5
+; GFNISSE-NEXT:    pand %xmm5, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm5 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm5, %xmm0
+; GFNISSE-NEXT:    pxor %xmm6, %xmm6
+; GFNISSE-NEXT:    psubb %xmm1, %xmm6
+; GFNISSE-NEXT:    pand %xmm6, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm5, %xmm1
+; GFNISSE-NEXT:    pxor %xmm6, %xmm6
+; GFNISSE-NEXT:    psubb %xmm2, %xmm6
+; GFNISSE-NEXT:    pand %xmm6, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm5, %xmm2
+; GFNISSE-NEXT:    psubb %xmm3, %xmm4
+; GFNISSE-NEXT:    pand %xmm4, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm5, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv64i8u:
 ; GFNIAVX1:       # %bb.0:
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm4
-; GFNIAVX1-NEXT:    vpandn %xmm4, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm2, %xmm5
-; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm7 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm7 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm7, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm5
-; GFNIAVX1-NEXT:    vpandn %xmm5, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm0, %xmm5
-; GFNIAVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm7, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm5
-; GFNIAVX1-NEXT:    vpandn %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm2, %xmm5
-; GFNIAVX1-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm7, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm3
-; GFNIAVX1-NEXT:    vpandn %xmm3, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm1, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm7, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpsubb %xmm2, %xmm3, %xmm2
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm3, %xmm4
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
+; GFNIAVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; GFNIAVX1-NEXT:    vpsubb %xmm4, %xmm3, %xmm4
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; GFNIAVX1-NEXT:    vandps %ymm3, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv64i8u:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm3
-; GFNIAVX2-NEXT:    vpandn %ymm3, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpbroadcastb {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNIAVX2-NEXT:    vpand %ymm3, %ymm0, %ymm4
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX2-NEXT:    # ymm5 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm6, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm4, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm1, %ymm2
-; GFNIAVX2-NEXT:    vpandn %ymm2, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpand %ymm3, %ymm1, %ymm2
-; GFNIAVX2-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm6, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm2, %ymm3
+; GFNIAVX2-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170,0,0,0,0,255,240,204,170]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm3, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpsubb %ymm1, %ymm2, %ymm2
+; GFNIAVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm3, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512VL-LABEL: testv64i8u:
 ; GFNIAVX512VL:       # %bb.0:
 ; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; GFNIAVX512VL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; GFNIAVX512VL-NEXT:    vpaddb %ymm2, %ymm1, %ymm3
-; GFNIAVX512VL-NEXT:    vpandn %ymm3, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; GFNIAVX512VL-NEXT:    vpand %ymm3, %ymm1, %ymm4
-; GFNIAVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX512VL-NEXT:    # ymm5 = mem[0,1,0,1]
-; GFNIAVX512VL-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm6, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm4, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
-; GFNIAVX512VL-NEXT:    vpandn %ymm2, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vpand %ymm3, %ymm0, %ymm2
-; GFNIAVX512VL-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm6, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; GFNIAVX512VL-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; GFNIAVX512VL-NEXT:    vpsubb %ymm1, %ymm2, %ymm1
+; GFNIAVX512VL-NEXT:    vpsubb %ymm0, %ymm2, %ymm2
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; GFNIAVX512VL-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8u:
 ; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
-; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vpandnq %zmm1, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; GFNIAVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; GFNIAVX512BW-NEXT:    vpshufb %zmm1, %zmm2, %zmm1
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpshufb %zmm0, %zmm2, %zmm0
-; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
+; GFNIAVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %in, i1 -1)
   ret <64 x i8> %out



More information about the llvm-commits mailing list