[llvm] [X86] Use GFNI for LZCNT vXi8 ops (PR #141888)

via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 1 22:01:30 PDT 2025


https://github.com/houngkoungting updated https://github.com/llvm/llvm-project/pull/141888

>From 157a532e88974522c2648e9d329cf08fc7fbddd6 Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Sat, 31 May 2025 23:36:02 +0800
Subject: [PATCH 1/3] [X86][GFNI] Add lowering support for CTLZ vXi8 using
 GF2P8AFFINEQB

---
 llvm/lib/Target/X86/X86ISelLowering.cpp |  32 ++
 llvm/test/CodeGen/X86/gfni-lzcnt.ll     | 635 ++++++++++--------------
 2 files changed, 291 insertions(+), 376 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c5d92d5034e8f..5e01f75bfd687 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -28998,6 +28998,35 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
   assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
   return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
 }
+static SDValue LowerVectorCTLZ_GFNI(SDValue Op, SelectionDAG &DAG,
+                                    const X86Subtarget &Subtarget) {
+  SDLoc dl(Op);
+  MVT VT = Op.getSimpleValueType();
+  SDValue Input = Op.getOperand(0);
+
+  if (!VT.isVector() || VT.getVectorElementType() != MVT::i8)
+    return SDValue();
+  SmallVector<SDValue, 16> MatrixVals;
+  for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
+    uint8_t mask = 1 << (7 - (i % 8));
+    MatrixVals.push_back(DAG.getConstant(mask, dl, MVT::i8));
+  }
+
+  SDValue Matrix = DAG.getBuildVector(VT, dl, MatrixVals);
+  SDValue Reversed = DAG.getNode(X86ISD::GF2P8AFFINEQB, dl, VT, Input, Matrix,
+                                 DAG.getTargetConstant(0, dl, MVT::i8));
+  SDValue AddMask = DAG.getConstant(0xFF, dl, MVT::i8);
+
+  SDValue AddVec = DAG.getSplatBuildVector(VT, dl, AddMask);
+  SDValue Summed = DAG.getNode(ISD::ADD, dl, VT, Reversed, AddVec);
+  SDValue NotSummed = DAG.getNode(ISD::XOR, dl, VT, Summed, AddVec);
+  SDValue Filtered = DAG.getNode(ISD::AND, dl, VT, NotSummed, Reversed);
+  SDValue FinalMatrix = DAG.getBuildVector(VT, dl, MatrixVals);
+  SDValue LZCNT =
+      DAG.getNode(X86ISD::GF2P8AFFINEQB, dl, VT, Filtered, FinalMatrix,
+                  DAG.getTargetConstant(8, dl, MVT::i8));
+  return LZCNT;
+}
 
 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {
@@ -29007,6 +29036,9 @@ static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
   SDLoc dl(Op);
   unsigned Opc = Op.getOpcode();
 
+  if (VT.isVector() && VT.getScalarType() == MVT::i8 && Subtarget.hasGFNI())
+    return LowerVectorCTLZ_GFNI(Op, DAG, Subtarget);
+
   if (VT.isVector())
     return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
 
diff --git a/llvm/test/CodeGen/X86/gfni-lzcnt.ll b/llvm/test/CodeGen/X86/gfni-lzcnt.ll
index 8e48950c32cd8..f4dd1d1b77ea9 100644
--- a/llvm/test/CodeGen/X86/gfni-lzcnt.ll
+++ b/llvm/test/CodeGen/X86/gfni-lzcnt.ll
@@ -8,40 +8,44 @@
 define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm2
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm2
-; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT:    pxor %xmm3, %xmm3
-; GFNISSE-NEXT:    pcmpeqb %xmm0, %xmm3
-; GFNISSE-NEXT:    pand %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm1
-; GFNISSE-NEXT:    paddb %xmm3, %xmm1
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE-NEXT:    pcmpeqd %xmm1, %xmm1
+; GFNISSE-NEXT:    paddb %xmm0, %xmm1
+; GFNISSE-NEXT:    pandn %xmm0, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm2, %xmm1
 ; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX1OR2-LABEL: testv16i8:
-; GFNIAVX1OR2:       # %bb.0:
-; GFNIAVX1OR2-NEXT:    vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX1OR2-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm3
-; GFNIAVX1OR2-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; GFNIAVX1OR2-NEXT:    retq
+; GFNIAVX1-LABEL: testv16i8:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX1-NEXT:    # xmm1 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
+; GFNIAVX1-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    retq
+;
+; GFNIAVX2-LABEL: testv16i8:
+; GFNIAVX2:       # %bb.0:
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; GFNIAVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; GFNIAVX2-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
+; GFNIAVX2-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
+; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX512-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm3
-; GFNIAVX512-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; GFNIAVX512-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; GFNIAVX512-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
+; GFNIAVX512-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0)
   ret <16 x i8> %out
@@ -50,40 +54,44 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm2
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm2
-; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT:    pxor %xmm3, %xmm3
-; GFNISSE-NEXT:    pcmpeqb %xmm0, %xmm3
-; GFNISSE-NEXT:    pand %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm1
-; GFNISSE-NEXT:    paddb %xmm3, %xmm1
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE-NEXT:    pcmpeqd %xmm1, %xmm1
+; GFNISSE-NEXT:    paddb %xmm0, %xmm1
+; GFNISSE-NEXT:    pandn %xmm0, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm2, %xmm1
 ; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX1OR2-LABEL: testv16i8u:
-; GFNIAVX1OR2:       # %bb.0:
-; GFNIAVX1OR2-NEXT:    vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX1OR2-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm3
-; GFNIAVX1OR2-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; GFNIAVX1OR2-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; GFNIAVX1OR2-NEXT:    retq
+; GFNIAVX1-LABEL: testv16i8u:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX1-NEXT:    # xmm1 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
+; GFNIAVX1-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    retq
+;
+; GFNIAVX2-LABEL: testv16i8u:
+; GFNIAVX2:       # %bb.0:
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; GFNIAVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; GFNIAVX2-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
+; GFNIAVX2-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
+; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8u:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX512-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm3
-; GFNIAVX512-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; GFNIAVX512-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; GFNIAVX512-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; GFNIAVX512-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
+; GFNIAVX512-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1)
   ret <16 x i8> %out
@@ -92,73 +100,52 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm3
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
-; GFNISSE-NEXT:    pxor %xmm5, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm6
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm6
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm0
-; GFNISSE-NEXT:    pand %xmm3, %xmm0
-; GFNISSE-NEXT:    paddb %xmm6, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm3
+; GFNISSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; GFNISSE-NEXT:    movdqa %xmm0, %xmm3
+; GFNISSE-NEXT:    paddb %xmm2, %xmm3
+; GFNISSE-NEXT:    pandn %xmm0, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm3
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
-; GFNISSE-NEXT:    pcmpeqb %xmm1, %xmm5
-; GFNISSE-NEXT:    pand %xmm3, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm2
-; GFNISSE-NEXT:    paddb %xmm5, %xmm2
+; GFNISSE-NEXT:    paddb %xmm1, %xmm2
+; GFNISSE-NEXT:    pandn %xmm1, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm2
+; GFNISSE-NEXT:    movdqa %xmm3, %xmm0
 ; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8:
 ; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX1-NEXT:    vmovq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm3
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm4 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm5, %xmm1, %xmm6
-; GFNIAVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm3
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm5, %xmm0, %xmm4
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm0, %xmm3, %xmm0
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm3
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; GFNIAVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX2-NEXT:    # ymm1 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm3
-; GFNIAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
+; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm2, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv32i8:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512-NEXT:    # ymm1 = mem[0,1,0,1]
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX512-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm3
-; GFNIAVX512-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
-; GFNIAVX512-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; GFNIAVX512-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
+; GFNIAVX512-NEXT:    vpandn %ymm0, %ymm2, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0)
   ret <32 x i8> %out
@@ -167,73 +154,52 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm3
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
-; GFNISSE-NEXT:    pxor %xmm5, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm6
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm6
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm0
-; GFNISSE-NEXT:    pand %xmm3, %xmm0
-; GFNISSE-NEXT:    paddb %xmm6, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm3
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm3
+; GFNISSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; GFNISSE-NEXT:    movdqa %xmm0, %xmm3
+; GFNISSE-NEXT:    paddb %xmm2, %xmm3
+; GFNISSE-NEXT:    pandn %xmm0, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm3
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
-; GFNISSE-NEXT:    pcmpeqb %xmm1, %xmm5
-; GFNISSE-NEXT:    pand %xmm3, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm2
-; GFNISSE-NEXT:    paddb %xmm5, %xmm2
+; GFNISSE-NEXT:    paddb %xmm1, %xmm2
+; GFNISSE-NEXT:    pandn %xmm1, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm2
+; GFNISSE-NEXT:    movdqa %xmm3, %xmm0
 ; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8u:
 ; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX1-NEXT:    vmovq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm3
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm4 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm5, %xmm1, %xmm6
-; GFNIAVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm3
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm5, %xmm0, %xmm4
-; GFNIAVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm0, %xmm3, %xmm0
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm3
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; GFNIAVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8u:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX2-NEXT:    # ymm1 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm3
-; GFNIAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
+; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm2, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv32i8u:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512-NEXT:    # ymm1 = mem[0,1,0,1]
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; GFNIAVX512-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm3
-; GFNIAVX512-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; GFNIAVX512-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
-; GFNIAVX512-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; GFNIAVX512-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
+; GFNIAVX512-NEXT:    vpandn %ymm0, %ymm2, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1)
   ret <32 x i8> %out
@@ -242,130 +208,88 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm4
-; GFNISSE-NEXT:    movq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm7
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm0
-; GFNISSE-NEXT:    pxor %xmm5, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm0
-; GFNISSE-NEXT:    pand %xmm7, %xmm0
-; GFNISSE-NEXT:    paddb %xmm8, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm1
-; GFNISSE-NEXT:    pand %xmm7, %xmm1
-; GFNISSE-NEXT:    paddb %xmm8, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm2
-; GFNISSE-NEXT:    pand %xmm7, %xmm2
-; GFNISSE-NEXT:    paddb %xmm8, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm4, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm4
-; GFNISSE-NEXT:    pcmpeqb %xmm4, %xmm5
-; GFNISSE-NEXT:    pand %xmm7, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm4, %xmm3
-; GFNISSE-NEXT:    paddb %xmm5, %xmm3
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm0
+; GFNISSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; GFNISSE-NEXT:    movdqa %xmm0, %xmm5
+; GFNISSE-NEXT:    paddb %xmm4, %xmm5
+; GFNISSE-NEXT:    pandn %xmm0, %xmm5
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm5
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm1
+; GFNISSE-NEXT:    movdqa %xmm1, %xmm6
+; GFNISSE-NEXT:    paddb %xmm4, %xmm6
+; GFNISSE-NEXT:    pandn %xmm1, %xmm6
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm6
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm2
+; GFNISSE-NEXT:    movdqa %xmm2, %xmm7
+; GFNISSE-NEXT:    paddb %xmm4, %xmm7
+; GFNISSE-NEXT:    pandn %xmm2, %xmm7
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm7
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm3
+; GFNISSE-NEXT:    paddb %xmm3, %xmm4
+; GFNISSE-NEXT:    pandn %xmm3, %xmm4
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm4
+; GFNISSE-NEXT:    movdqa %xmm5, %xmm0
+; GFNISSE-NEXT:    movdqa %xmm6, %xmm1
+; GFNISSE-NEXT:    movdqa %xmm7, %xmm2
+; GFNISSE-NEXT:    movdqa %xmm4, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv64i8:
 ; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX1-NEXT:    vmovq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm5 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm5 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpxor %xmm6, %xmm6, %xmm6
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm2, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm0, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm0, %xmm4, %xmm0
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm2, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm1, %xmm5
-; GFNIAVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm1, %xmm4, %xmm1
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; GFNIAVX1-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm0, %xmm5
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; GFNIAVX1-NEXT:    vandnps %ymm0, %ymm3, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm1, %xmm4
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; GFNIAVX1-NEXT:    vandnps %ymm1, %ymm3, %ymm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv64i8:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX2-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm5, %ymm0, %ymm6
-; GFNIAVX2-NEXT:    vpand %ymm6, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm0, %ymm3, %ymm0
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm5, %ymm1, %ymm4
-; GFNIAVX2-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
+; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm4
+; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm4, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm3
+; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm3, %ymm1
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512VL-LABEL: testv64i8:
 ; GFNIAVX512VL:       # %bb.0:
-; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; GFNIAVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512VL-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
-; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX512VL-NEXT:    vpcmpeqb %ymm5, %ymm1, %ymm6
-; GFNIAVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vpcmpeqb %ymm5, %ymm0, %ymm4
-; GFNIAVX512VL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX512VL-NEXT:    vpaddb %ymm0, %ymm3, %ymm0
-; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; GFNIAVX512VL-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
+; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm2, %ymm2
+; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm0, %ymm3
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; GFNIAVX512VL-NEXT:    vpandnq %zmm0, %zmm2, %zmm0
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, %zmm1, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8:
 ; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vptestnmb %zmm1, %zmm1, %k1
-; GFNIAVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; GFNIAVX512BW-NEXT:    vpshufb %zmm0, %zmm2, %zmm0 {%k1} {z}
-; GFNIAVX512BW-NEXT:    vpshufb %zmm1, %zmm2, %zmm1
-; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm2 = -1
+; GFNIAVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm2
+; GFNIAVX512BW-NEXT:    vpandnq %zmm0, %zmm2, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, %zmm1, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0)
   ret <64 x i8> %out
@@ -374,133 +298,92 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm4
-; GFNISSE-NEXT:    movq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm7
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm0
-; GFNISSE-NEXT:    pxor %xmm5, %xmm5
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm0, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm0
-; GFNISSE-NEXT:    pand %xmm7, %xmm0
-; GFNISSE-NEXT:    paddb %xmm8, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm1, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm1
-; GFNISSE-NEXT:    pand %xmm7, %xmm1
-; GFNISSE-NEXT:    paddb %xmm8, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm8
-; GFNISSE-NEXT:    pshufb %xmm2, %xmm8
-; GFNISSE-NEXT:    pcmpeqb %xmm5, %xmm2
-; GFNISSE-NEXT:    pand %xmm7, %xmm2
-; GFNISSE-NEXT:    paddb %xmm8, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm7
-; GFNISSE-NEXT:    pshufb %xmm4, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm6, %xmm4
-; GFNISSE-NEXT:    pcmpeqb %xmm4, %xmm5
-; GFNISSE-NEXT:    pand %xmm7, %xmm5
-; GFNISSE-NEXT:    pshufb %xmm4, %xmm3
-; GFNISSE-NEXT:    paddb %xmm5, %xmm3
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm0
+; GFNISSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; GFNISSE-NEXT:    movdqa %xmm0, %xmm5
+; GFNISSE-NEXT:    paddb %xmm4, %xmm5
+; GFNISSE-NEXT:    pandn %xmm0, %xmm5
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm5
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm1
+; GFNISSE-NEXT:    movdqa %xmm1, %xmm6
+; GFNISSE-NEXT:    paddb %xmm4, %xmm6
+; GFNISSE-NEXT:    pandn %xmm1, %xmm6
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm6
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm2
+; GFNISSE-NEXT:    movdqa %xmm2, %xmm7
+; GFNISSE-NEXT:    paddb %xmm4, %xmm7
+; GFNISSE-NEXT:    pandn %xmm2, %xmm7
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm7
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm3
+; GFNISSE-NEXT:    paddb %xmm3, %xmm4
+; GFNISSE-NEXT:    pandn %xmm3, %xmm4
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm4
+; GFNISSE-NEXT:    movdqa %xmm5, %xmm0
+; GFNISSE-NEXT:    movdqa %xmm6, %xmm1
+; GFNISSE-NEXT:    movdqa %xmm7, %xmm2
+; GFNISSE-NEXT:    movdqa %xmm4, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv64i8u:
 ; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX1-NEXT:    vmovq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm5 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX1-NEXT:    # xmm5 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpxor %xmm6, %xmm6, %xmm6
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm2, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm0, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; GFNIAVX1-NEXT:    vpaddb %xmm0, %xmm4, %xmm0
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm2, %xmm7
-; GFNIAVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm4, %xmm2
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm4
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm5, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpcmpeqb %xmm6, %xmm1, %xmm5
-; GFNIAVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm1, %xmm4, %xmm1
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; GFNIAVX1-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm0, %xmm5
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; GFNIAVX1-NEXT:    vandnps %ymm0, %ymm3, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm1, %xmm4
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; GFNIAVX1-NEXT:    vandnps %ymm1, %ymm3, %ymm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv64i8u:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX2-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm5, %ymm0, %ymm6
-; GFNIAVX2-NEXT:    vpand %ymm6, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX2-NEXT:    vpaddb %ymm0, %ymm3, %ymm0
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpcmpeqb %ymm5, %ymm1, %ymm4
-; GFNIAVX2-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
+; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm4
+; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm4, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm3
+; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm3, %ymm1
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512VL-LABEL: testv64i8u:
 ; GFNIAVX512VL:       # %bb.0:
-; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; GFNIAVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512VL-NEXT:    # ymm2 = mem[0,1,0,1]
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
-; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; GFNIAVX512VL-NEXT:    vpcmpeqb %ymm5, %ymm1, %ymm6
-; GFNIAVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0
-; GFNIAVX512VL-NEXT:    vpcmpeqb %ymm5, %ymm0, %ymm4
-; GFNIAVX512VL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; GFNIAVX512VL-NEXT:    vpaddb %ymm0, %ymm3, %ymm0
-; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; GFNIAVX512VL-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
+; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm2, %ymm2
+; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm0, %ymm3
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; GFNIAVX512VL-NEXT:    vpandnq %zmm0, %zmm2, %zmm0
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, %zmm1, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8u:
 ; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vptestnmb %zmm1, %zmm1, %k1
-; GFNIAVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; GFNIAVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; GFNIAVX512BW-NEXT:    vpshufb %zmm0, %zmm2, %zmm0 {%k1} {z}
-; GFNIAVX512BW-NEXT:    vpshufb %zmm1, %zmm2, %zmm1
-; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm2 = -1
+; GFNIAVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm2
+; GFNIAVX512BW-NEXT:    vpandnq %zmm0, %zmm2, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, %zmm1, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1)
   ret <64 x i8> %out
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFNIAVX: {{.*}}
+; GFNIAVX1OR2: {{.*}}

>From e789fab569232abf361bc73d20cea0e0f45b7321 Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Sun, 1 Jun 2025 23:54:55 +0800
Subject: [PATCH 2/3] [X86][GFNI] Lower vXi8 ctlz using GF2P8AFFINEQB

---
 llvm/lib/Target/X86/X86ISelLowering.cpp |  48 ++--
 llvm/test/CodeGen/X86/gfni-lzcnt.ll     | 353 ++++++++++++------------
 2 files changed, 200 insertions(+), 201 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 5e01f75bfd687..e68c004d524df 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -28988,7 +28988,7 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
     return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
 
   // Decompose 256-bit ops into smaller 128-bit ops.
-  if (VT.is256BitVector() && !Subtarget.hasInt256())
+  if (VT.is256BitVector() && !Subtarget.hasInt256()) 
     return splitVectorIntUnary(Op, DAG, DL);
 
   // Decompose 512-bit ops into smaller 256-bit ops.
@@ -28998,36 +28998,40 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
   assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
   return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
 }
-static SDValue LowerVectorCTLZ_GFNI(SDValue Op, SelectionDAG &DAG,
+static SDValue LowerVectorCTLZ_GFNI(SDValue Op, const SDLoc &DL,
+                                    SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
-  SDLoc dl(Op);
   MVT VT = Op.getSimpleValueType();
   SDValue Input = Op.getOperand(0);
 
-  if (!VT.isVector() || VT.getVectorElementType() != MVT::i8)
-    return SDValue();
-  SmallVector<SDValue, 16> MatrixVals;
-  for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
-    uint8_t mask = 1 << (7 - (i % 8));
-    MatrixVals.push_back(DAG.getConstant(mask, dl, MVT::i8));
-  }
+  assert(VT.isVector() && VT.getVectorElementType() == MVT::i8 &&
+         "Expected vXi8 input for GFNI-based CTLZ lowering");
 
-  SDValue Matrix = DAG.getBuildVector(VT, dl, MatrixVals);
-  SDValue Reversed = DAG.getNode(X86ISD::GF2P8AFFINEQB, dl, VT, Input, Matrix,
-                                 DAG.getTargetConstant(0, dl, MVT::i8));
-  SDValue AddMask = DAG.getConstant(0xFF, dl, MVT::i8);
+  // Step 1: Bit-reverse input
+  SDValue Reversed = DAG.getNode(ISD::BITREVERSE, DL, VT, Input);
+
+  // Step 2: Add 0xFF
+  SDValue AddVec = DAG.getAllOnesConstant(DL, VT);
+  SDValue Summed = DAG.getNode(ISD::ADD, DL, VT, Reversed, AddVec);
+
+  // Step 3: Not(Summed)
+  SDValue NotSummed = DAG.getNOT(DL, Summed, VT);
+
+  // Step 4: AND with Reversed
+  SDValue Filtered = DAG.getNode(ISD::AND, DL, VT, NotSummed, Reversed);
+
+  // Step 5: Apply CTTZ LUT using GF2P8AFFINEQB
+  MVT VT64 = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
+  SDValue CTTZConst = DAG.getConstant(0xAACCF0FF00000000ULL, DL, VT64);
+  SDValue CTTZMatrix = DAG.getBitcast(VT, CTTZConst);
 
-  SDValue AddVec = DAG.getSplatBuildVector(VT, dl, AddMask);
-  SDValue Summed = DAG.getNode(ISD::ADD, dl, VT, Reversed, AddVec);
-  SDValue NotSummed = DAG.getNode(ISD::XOR, dl, VT, Summed, AddVec);
-  SDValue Filtered = DAG.getNode(ISD::AND, dl, VT, NotSummed, Reversed);
-  SDValue FinalMatrix = DAG.getBuildVector(VT, dl, MatrixVals);
   SDValue LZCNT =
-      DAG.getNode(X86ISD::GF2P8AFFINEQB, dl, VT, Filtered, FinalMatrix,
-                  DAG.getTargetConstant(8, dl, MVT::i8));
+      DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, Filtered, CTTZMatrix,
+                  DAG.getTargetConstant(8, DL, MVT::i8));
   return LZCNT;
 }
 
+
 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {
   MVT VT = Op.getSimpleValueType();
@@ -29037,7 +29041,7 @@ static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
   unsigned Opc = Op.getOpcode();
 
   if (VT.isVector() && VT.getScalarType() == MVT::i8 && Subtarget.hasGFNI())
-    return LowerVectorCTLZ_GFNI(Op, DAG, Subtarget);
+    return LowerVectorCTLZ_GFNI(Op, dl, DAG, Subtarget);
 
   if (VT.isVector())
     return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
diff --git a/llvm/test/CodeGen/X86/gfni-lzcnt.ll b/llvm/test/CodeGen/X86/gfni-lzcnt.ll
index f4dd1d1b77ea9..d6d4b6ebce6f0 100644
--- a/llvm/test/CodeGen/X86/gfni-lzcnt.ll
+++ b/llvm/test/CodeGen/X86/gfni-lzcnt.ll
@@ -8,44 +8,30 @@
 define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; GFNISSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; GFNISSE-NEXT:    paddb %xmm0, %xmm1
 ; GFNISSE-NEXT:    pandn %xmm0, %xmm1
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm2, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX1-LABEL: testv16i8:
-; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX1-NEXT:    # xmm1 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
-; GFNIAVX1-NEXT:    vpandn %xmm0, %xmm2, %xmm0
-; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    retq
-;
-; GFNIAVX2-LABEL: testv16i8:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
-; GFNIAVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; GFNIAVX2-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
-; GFNIAVX2-NEXT:    vpandn %xmm0, %xmm2, %xmm0
-; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
-; GFNIAVX2-NEXT:    retq
+; GFNIAVX1OR2-LABEL: testv16i8:
+; GFNIAVX1OR2:       # %bb.0:
+; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
+; GFNIAVX1OR2-NEXT:    vpandn %xmm0, %xmm1, %xmm0
+; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; GFNIAVX512-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
-; GFNIAVX512-NEXT:    vpandn %xmm0, %xmm2, %xmm0
-; GFNIAVX512-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
+; GFNIAVX512-NEXT:    vpandn %xmm0, %xmm1, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0)
   ret <16 x i8> %out
@@ -54,44 +40,30 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; GFNISSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; GFNISSE-NEXT:    paddb %xmm0, %xmm1
 ; GFNISSE-NEXT:    pandn %xmm0, %xmm1
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm2, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX1-LABEL: testv16i8u:
-; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX1-NEXT:    # xmm1 = mem[0,0]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
-; GFNIAVX1-NEXT:    vpandn %xmm0, %xmm2, %xmm0
-; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
-; GFNIAVX1-NEXT:    retq
-;
-; GFNIAVX2-LABEL: testv16i8u:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
-; GFNIAVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; GFNIAVX2-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
-; GFNIAVX2-NEXT:    vpandn %xmm0, %xmm2, %xmm0
-; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
-; GFNIAVX2-NEXT:    retq
+; GFNIAVX1OR2-LABEL: testv16i8u:
+; GFNIAVX1OR2:       # %bb.0:
+; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
+; GFNIAVX1OR2-NEXT:    vpandn %xmm0, %xmm1, %xmm0
+; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8u:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; GFNIAVX512-NEXT:    vpaddb %xmm2, %xmm0, %xmm2
-; GFNIAVX512-NEXT:    vpandn %xmm0, %xmm2, %xmm0
-; GFNIAVX512-NEXT:    vgf2p8affineqb $8, %xmm1, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; GFNIAVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
+; GFNIAVX512-NEXT:    vpandn %xmm0, %xmm1, %xmm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1)
   ret <16 x i8> %out
@@ -100,52 +72,55 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
 ; GFNISSE-NEXT:    pcmpeqd %xmm2, %xmm2
 ; GFNISSE-NEXT:    movdqa %xmm0, %xmm3
 ; GFNISSE-NEXT:    paddb %xmm2, %xmm3
 ; GFNISSE-NEXT:    pandn %xmm0, %xmm3
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm3
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm0 = [12307476859704049664,12307476859704049664]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm3
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
 ; GFNISSE-NEXT:    paddb %xmm1, %xmm2
 ; GFNISSE-NEXT:    pandn %xmm1, %xmm2
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm2
 ; GFNISSE-NEXT:    movdqa %xmm3, %xmm0
 ; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8:
 ; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX1-NEXT:    # xmm2 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
 ; GFNIAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm3
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; GFNIAVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
-; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandnps %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
-; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm2, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
+; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv32i8:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; GFNIAVX512-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
-; GFNIAVX512-NEXT:    vpandn %ymm0, %ymm2, %ymm0
-; GFNIAVX512-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; GFNIAVX512-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
+; GFNIAVX512-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0)
   ret <32 x i8> %out
@@ -154,52 +129,55 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
 ; GFNISSE-NEXT:    pcmpeqd %xmm2, %xmm2
 ; GFNISSE-NEXT:    movdqa %xmm0, %xmm3
 ; GFNISSE-NEXT:    paddb %xmm2, %xmm3
 ; GFNISSE-NEXT:    pandn %xmm0, %xmm3
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm3
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm0 = [12307476859704049664,12307476859704049664]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm3
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
 ; GFNISSE-NEXT:    paddb %xmm1, %xmm2
 ; GFNISSE-NEXT:    pandn %xmm1, %xmm2
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm2
 ; GFNISSE-NEXT:    movdqa %xmm3, %xmm0
 ; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8u:
 ; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX1-NEXT:    # xmm2 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
 ; GFNIAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm3
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; GFNIAVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
-; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandnps %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8u:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; GFNIAVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
-; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm2, %ymm0
+; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
+; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv32i8u:
 ; GFNIAVX512:       # %bb.0:
-; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; GFNIAVX512-NEXT:    vpaddb %ymm2, %ymm0, %ymm2
-; GFNIAVX512-NEXT:    vpandn %ymm0, %ymm2, %ymm0
-; GFNIAVX512-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; GFNIAVX512-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
+; GFNIAVX512-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1)
   ret <32 x i8> %out
@@ -208,27 +186,28 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm0
 ; GFNISSE-NEXT:    pcmpeqd %xmm4, %xmm4
 ; GFNISSE-NEXT:    movdqa %xmm0, %xmm5
 ; GFNISSE-NEXT:    paddb %xmm4, %xmm5
 ; GFNISSE-NEXT:    pandn %xmm0, %xmm5
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm5
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm0 = [12307476859704049664,12307476859704049664]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm5
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm1
 ; GFNISSE-NEXT:    movdqa %xmm1, %xmm6
 ; GFNISSE-NEXT:    paddb %xmm4, %xmm6
 ; GFNISSE-NEXT:    pandn %xmm1, %xmm6
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm6
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm6
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm2
 ; GFNISSE-NEXT:    movdqa %xmm2, %xmm7
 ; GFNISSE-NEXT:    paddb %xmm4, %xmm7
 ; GFNISSE-NEXT:    pandn %xmm2, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm7
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm7
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm3
 ; GFNISSE-NEXT:    paddb %xmm3, %xmm4
 ; GFNISSE-NEXT:    pandn %xmm3, %xmm4
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm4
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm4
 ; GFNISSE-NEXT:    movdqa %xmm5, %xmm0
 ; GFNISSE-NEXT:    movdqa %xmm6, %xmm1
 ; GFNISSE-NEXT:    movdqa %xmm7, %xmm2
@@ -237,59 +216,67 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ;
 ; GFNIAVX1-LABEL: testv64i8:
 ; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm0, %xmm5
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; GFNIAVX1-NEXT:    vandnps %ymm0, %ymm3, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX1-NEXT:    # xmm3 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; GFNIAVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandnps %ymm4, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm1, %xmm4
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; GFNIAVX1-NEXT:    vandnps %ymm1, %ymm3, %ymm1
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm4, %xmm4
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm3
+; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm4, %xmm4
+; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vandnps %ymm3, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv64i8:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
 ; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm4
 ; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm4, %ymm0
-; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm3
-; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm3, %ymm1
-; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm2
+; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm2, %ymm1
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512VL-LABEL: testv64i8:
 ; GFNIAVX512VL:       # %bb.0:
-; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
-; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
 ; GFNIAVX512VL-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm2, %ymm2
-; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm0, %ymm3
-; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; GFNIAVX512VL-NEXT:    vpandnq %zmm0, %zmm2, %zmm0
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, %zmm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
+; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpandnq %zmm2, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8:
 ; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm2 = -1
-; GFNIAVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm2
-; GFNIAVX512BW-NEXT:    vpandnq %zmm0, %zmm2, %zmm0
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
+; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm1
+; GFNIAVX512BW-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0)
   ret <64 x i8> %out
@@ -298,27 +285,28 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm0
 ; GFNISSE-NEXT:    pcmpeqd %xmm4, %xmm4
 ; GFNISSE-NEXT:    movdqa %xmm0, %xmm5
 ; GFNISSE-NEXT:    paddb %xmm4, %xmm5
 ; GFNISSE-NEXT:    pandn %xmm0, %xmm5
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm5
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm0 = [12307476859704049664,12307476859704049664]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm5
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm1
 ; GFNISSE-NEXT:    movdqa %xmm1, %xmm6
 ; GFNISSE-NEXT:    paddb %xmm4, %xmm6
 ; GFNISSE-NEXT:    pandn %xmm1, %xmm6
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm6
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm6
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm2
 ; GFNISSE-NEXT:    movdqa %xmm2, %xmm7
 ; GFNISSE-NEXT:    paddb %xmm4, %xmm7
 ; GFNISSE-NEXT:    pandn %xmm2, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm7
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm7
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm3
 ; GFNISSE-NEXT:    paddb %xmm3, %xmm4
 ; GFNISSE-NEXT:    pandn %xmm3, %xmm4
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm8, %xmm4
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm4
 ; GFNISSE-NEXT:    movdqa %xmm5, %xmm0
 ; GFNISSE-NEXT:    movdqa %xmm6, %xmm1
 ; GFNISSE-NEXT:    movdqa %xmm7, %xmm2
@@ -327,63 +315,70 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ;
 ; GFNIAVX1-LABEL: testv64i8u:
 ; GFNIAVX1:       # %bb.0:
-; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm0, %xmm5
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; GFNIAVX1-NEXT:    vandnps %ymm0, %ymm3, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vmovddup {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX1-NEXT:    # xmm3 = mem[0,0]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; GFNIAVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandnps %ymm4, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
-; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm4, %xmm1, %xmm4
-; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; GFNIAVX1-NEXT:    vandnps %ymm1, %ymm3, %ymm1
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm4, %xmm4
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm3
+; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm4, %xmm4
+; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vandnps %ymm3, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv64i8u:
 ; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
 ; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm4
 ; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm4, %ymm0
-; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
+; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm3
-; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm3, %ymm1
-; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm2
+; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm2, %ymm1
+; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
 ; GFNIAVX512VL-LABEL: testv64i8u:
 ; GFNIAVX512VL:       # %bb.0:
-; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
-; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; GFNIAVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; GFNIAVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
 ; GFNIAVX512VL-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm2, %ymm2
-; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm0, %ymm3
-; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; GFNIAVX512VL-NEXT:    vpandnq %zmm0, %zmm2, %zmm0
-; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, %zmm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
+; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpandnq %zmm2, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8u:
 ; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm2 = -1
-; GFNIAVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm2
-; GFNIAVX512BW-NEXT:    vpandnq %zmm0, %zmm2, %zmm0
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
+; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm1
+; GFNIAVX512BW-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1)
   ret <64 x i8> %out
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFNIAVX: {{.*}}
-; GFNIAVX1OR2: {{.*}}

>From 51aa741fe5a24b913688b79186b1ad86be5a2a36 Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Mon, 2 Jun 2025 09:27:31 +0800
Subject: [PATCH 3/3] [X86][GFNI] Fix style and logic for CTLZ vXi8 lowering

---
 llvm/lib/Target/X86/X86ISelLowering.cpp |  18 +-
 llvm/test/CodeGen/X86/gfni-lzcnt.ll     | 306 +++++++++++-------------
 2 files changed, 152 insertions(+), 172 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e68c004d524df..6fdecd66fc9e1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -28988,7 +28988,7 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
     return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
 
   // Decompose 256-bit ops into smaller 128-bit ops.
-  if (VT.is256BitVector() && !Subtarget.hasInt256()) 
+  if (VT.is256BitVector() && !Subtarget.hasInt256())
     return splitVectorIntUnary(Op, DAG, DL);
 
   // Decompose 512-bit ops into smaller 256-bit ops.
@@ -28998,6 +28998,7 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
   assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
   return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
 }
+
 static SDValue LowerVectorCTLZ_GFNI(SDValue Op, const SDLoc &DL,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
@@ -29007,20 +29008,13 @@ static SDValue LowerVectorCTLZ_GFNI(SDValue Op, const SDLoc &DL,
   assert(VT.isVector() && VT.getVectorElementType() == MVT::i8 &&
          "Expected vXi8 input for GFNI-based CTLZ lowering");
 
-  // Step 1: Bit-reverse input
   SDValue Reversed = DAG.getNode(ISD::BITREVERSE, DL, VT, Input);
 
-  // Step 2: Add 0xFF
-  SDValue AddVec = DAG.getAllOnesConstant(DL, VT);
-  SDValue Summed = DAG.getNode(ISD::ADD, DL, VT, Reversed, AddVec);
-
-  // Step 3: Not(Summed)
-  SDValue NotSummed = DAG.getNOT(DL, Summed, VT);
-
-  // Step 4: AND with Reversed
-  SDValue Filtered = DAG.getNode(ISD::AND, DL, VT, NotSummed, Reversed);
+  SDValue Zero = DAG.getConstant(0, DL, MVT::i8);
+  SDValue ZeroVec = DAG.getSplatBuildVector(VT, DL, Zero);
+  SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, ZeroVec, Reversed);
+  SDValue Filtered = DAG.getNode(ISD::AND, DL, VT, Reversed, Neg);
 
-  // Step 5: Apply CTTZ LUT using GF2P8AFFINEQB
   MVT VT64 = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
   SDValue CTTZConst = DAG.getConstant(0xAACCF0FF00000000ULL, DL, VT64);
   SDValue CTTZMatrix = DAG.getBitcast(VT, CTTZConst);
diff --git a/llvm/test/CodeGen/X86/gfni-lzcnt.ll b/llvm/test/CodeGen/X86/gfni-lzcnt.ll
index d6d4b6ebce6f0..6e93f218f1c15 100644
--- a/llvm/test/CodeGen/X86/gfni-lzcnt.ll
+++ b/llvm/test/CodeGen/X86/gfni-lzcnt.ll
@@ -9,28 +9,27 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8:
 ; GFNISSE:       # %bb.0:
 ; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT:    pcmpeqd %xmm1, %xmm1
-; GFNISSE-NEXT:    paddb %xmm0, %xmm1
-; GFNISSE-NEXT:    pandn %xmm0, %xmm1
-; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
+; GFNISSE-NEXT:    pxor %xmm1, %xmm1
+; GFNISSE-NEXT:    psubb %xmm0, %xmm1
+; GFNISSE-NEXT:    pand %xmm1, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1OR2-LABEL: testv16i8:
 ; GFNIAVX1OR2:       # %bb.0:
 ; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
-; GFNIAVX1OR2-NEXT:    vpandn %xmm0, %xmm1, %xmm0
+; GFNIAVX1OR2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX1OR2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8:
 ; GFNIAVX512:       # %bb.0:
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; GFNIAVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
-; GFNIAVX512-NEXT:    vpandn %xmm0, %xmm1, %xmm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0)
@@ -41,28 +40,27 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv16i8u:
 ; GFNISSE:       # %bb.0:
 ; GFNISSE-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT:    pcmpeqd %xmm1, %xmm1
-; GFNISSE-NEXT:    paddb %xmm0, %xmm1
-; GFNISSE-NEXT:    pandn %xmm0, %xmm1
-; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm0
+; GFNISSE-NEXT:    pxor %xmm1, %xmm1
+; GFNISSE-NEXT:    psubb %xmm0, %xmm1
+; GFNISSE-NEXT:    pand %xmm1, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1OR2-LABEL: testv16i8u:
 ; GFNIAVX1OR2:       # %bb.0:
 ; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX1OR2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; GFNIAVX1OR2-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
-; GFNIAVX1OR2-NEXT:    vpandn %xmm0, %xmm1, %xmm0
+; GFNIAVX1OR2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; GFNIAVX1OR2-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX1OR2-NEXT:    retq
 ;
 ; GFNIAVX512-LABEL: testv16i8u:
 ; GFNIAVX512:       # %bb.0:
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; GFNIAVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; GFNIAVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
-; GFNIAVX512-NEXT:    vpandn %xmm0, %xmm1, %xmm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1)
@@ -72,20 +70,18 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
-; GFNISSE-NEXT:    pcmpeqd %xmm2, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm3
-; GFNISSE-NEXT:    paddb %xmm2, %xmm3
-; GFNISSE-NEXT:    pandn %xmm0, %xmm3
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm0 = [12307476859704049664,12307476859704049664]
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm3
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
-; GFNISSE-NEXT:    paddb %xmm1, %xmm2
-; GFNISSE-NEXT:    pandn %xmm1, %xmm2
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE-NEXT:    pxor %xmm3, %xmm3
+; GFNISSE-NEXT:    pxor %xmm4, %xmm4
+; GFNISSE-NEXT:    psubb %xmm0, %xmm4
+; GFNISSE-NEXT:    pand %xmm4, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [12307476859704049664,12307476859704049664]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm1
+; GFNISSE-NEXT:    psubb %xmm1, %xmm3
+; GFNISSE-NEXT:    pand %xmm3, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8:
@@ -96,20 +92,20 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm3, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vandnps %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandps %ymm0, %ymm2, %ymm0
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8:
 ; GFNIAVX2:       # %bb.0:
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
-; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
@@ -117,9 +113,9 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 ; GFNIAVX512-LABEL: testv32i8:
 ; GFNIAVX512:       # %bb.0:
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; GFNIAVX512-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
-; GFNIAVX512-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0)
@@ -129,20 +125,18 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv32i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
-; GFNISSE-NEXT:    pcmpeqd %xmm2, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm3
-; GFNISSE-NEXT:    paddb %xmm2, %xmm3
-; GFNISSE-NEXT:    pandn %xmm0, %xmm3
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm0 = [12307476859704049664,12307476859704049664]
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm3
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
-; GFNISSE-NEXT:    paddb %xmm1, %xmm2
-; GFNISSE-NEXT:    pandn %xmm1, %xmm2
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm3, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm1
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE-NEXT:    pxor %xmm3, %xmm3
+; GFNISSE-NEXT:    pxor %xmm4, %xmm4
+; GFNISSE-NEXT:    psubb %xmm0, %xmm4
+; GFNISSE-NEXT:    pand %xmm4, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [12307476859704049664,12307476859704049664]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm1
+; GFNISSE-NEXT:    psubb %xmm1, %xmm3
+; GFNISSE-NEXT:    pand %xmm3, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm4, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv32i8u:
@@ -153,20 +147,20 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; GFNIAVX1-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm3, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vandnps %ymm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandps %ymm0, %ymm2, %ymm0
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: testv32i8u:
 ; GFNIAVX2:       # %bb.0:
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
-; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
@@ -174,9 +168,9 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 ; GFNIAVX512-LABEL: testv32i8u:
 ; GFNIAVX512:       # %bb.0:
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
-; GFNIAVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; GFNIAVX512-NEXT:    vpaddb %ymm1, %ymm0, %ymm1
-; GFNIAVX512-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; GFNIAVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
+; GFNIAVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
 ; GFNIAVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1)
@@ -186,32 +180,28 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm0
-; GFNISSE-NEXT:    pcmpeqd %xmm4, %xmm4
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm5
-; GFNISSE-NEXT:    paddb %xmm4, %xmm5
-; GFNISSE-NEXT:    pandn %xmm0, %xmm5
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm0 = [12307476859704049664,12307476859704049664]
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm5
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm6
-; GFNISSE-NEXT:    paddb %xmm4, %xmm6
-; GFNISSE-NEXT:    pandn %xmm1, %xmm6
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm6
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm7
-; GFNISSE-NEXT:    paddb %xmm4, %xmm7
-; GFNISSE-NEXT:    pandn %xmm2, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm3
-; GFNISSE-NEXT:    paddb %xmm3, %xmm4
-; GFNISSE-NEXT:    pandn %xmm3, %xmm4
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm4
-; GFNISSE-NEXT:    movdqa %xmm5, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm6, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm7, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm3
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
+; GFNISSE-NEXT:    pxor %xmm5, %xmm5
+; GFNISSE-NEXT:    pxor %xmm6, %xmm6
+; GFNISSE-NEXT:    psubb %xmm0, %xmm6
+; GFNISSE-NEXT:    pand %xmm6, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm6 = [12307476859704049664,12307476859704049664]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
+; GFNISSE-NEXT:    pxor %xmm7, %xmm7
+; GFNISSE-NEXT:    psubb %xmm1, %xmm7
+; GFNISSE-NEXT:    pand %xmm7, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm2
+; GFNISSE-NEXT:    pxor %xmm7, %xmm7
+; GFNISSE-NEXT:    psubb %xmm2, %xmm7
+; GFNISSE-NEXT:    pand %xmm7, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm3
+; GFNISSE-NEXT:    psubb %xmm3, %xmm5
+; GFNISSE-NEXT:    pand %xmm5, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv64i8:
@@ -222,21 +212,21 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; GFNIAVX1-NEXT:    vpsubb %xmm2, %xmm5, %xmm2
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm5, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vandnps %ymm4, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandps %ymm0, %ymm4, %ymm0
 ; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm4, %xmm4
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm3
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vpsubb %xmm4, %xmm5, %xmm4
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm5, %xmm1
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; GFNIAVX1-NEXT:    vandnps %ymm3, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vandps %ymm1, %ymm3, %ymm1
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
@@ -244,14 +234,14 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; GFNIAVX2:       # %bb.0:
 ; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm4
-; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm4, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm3, %ymm4
+; GFNIAVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm2
-; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm2, %ymm1
+; GFNIAVX2-NEXT:    vpsubb %ymm1, %ymm3, %ymm2
+; GFNIAVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
@@ -262,20 +252,20 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
 ; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
 ; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
-; GFNIAVX512VL-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX512VL-NEXT:    vpsubb %ymm1, %ymm3, %ymm1
+; GFNIAVX512VL-NEXT:    vpsubb %ymm0, %ymm3, %ymm0
 ; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; GFNIAVX512VL-NEXT:    vpandnq %zmm2, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpandq %zmm0, %zmm2, %zmm0
 ; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8:
 ; GFNIAVX512BW:       # %bb.0:
 ; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
-; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
+; GFNIAVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
+; GFNIAVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0)
@@ -285,32 +275,28 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; GFNISSE-LABEL: testv64i8u:
 ; GFNISSE:       # %bb.0:
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm8 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm0
-; GFNISSE-NEXT:    pcmpeqd %xmm4, %xmm4
-; GFNISSE-NEXT:    movdqa %xmm0, %xmm5
-; GFNISSE-NEXT:    paddb %xmm4, %xmm5
-; GFNISSE-NEXT:    pandn %xmm0, %xmm5
-; GFNISSE-NEXT:    movdqa {{.*#+}} xmm0 = [12307476859704049664,12307476859704049664]
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm5
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm1, %xmm6
-; GFNISSE-NEXT:    paddb %xmm4, %xmm6
-; GFNISSE-NEXT:    pandn %xmm1, %xmm6
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm6
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm2, %xmm7
-; GFNISSE-NEXT:    paddb %xmm4, %xmm7
-; GFNISSE-NEXT:    pandn %xmm2, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm7
-; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm8, %xmm3
-; GFNISSE-NEXT:    paddb %xmm3, %xmm4
-; GFNISSE-NEXT:    pandn %xmm3, %xmm4
-; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm0, %xmm4
-; GFNISSE-NEXT:    movdqa %xmm5, %xmm0
-; GFNISSE-NEXT:    movdqa %xmm6, %xmm1
-; GFNISSE-NEXT:    movdqa %xmm7, %xmm2
-; GFNISSE-NEXT:    movdqa %xmm4, %xmm3
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm0
+; GFNISSE-NEXT:    pxor %xmm5, %xmm5
+; GFNISSE-NEXT:    pxor %xmm6, %xmm6
+; GFNISSE-NEXT:    psubb %xmm0, %xmm6
+; GFNISSE-NEXT:    pand %xmm6, %xmm0
+; GFNISSE-NEXT:    movdqa {{.*#+}} xmm6 = [12307476859704049664,12307476859704049664]
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm0
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm1
+; GFNISSE-NEXT:    pxor %xmm7, %xmm7
+; GFNISSE-NEXT:    psubb %xmm1, %xmm7
+; GFNISSE-NEXT:    pand %xmm7, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm1
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm2
+; GFNISSE-NEXT:    pxor %xmm7, %xmm7
+; GFNISSE-NEXT:    psubb %xmm2, %xmm7
+; GFNISSE-NEXT:    pand %xmm7, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm2
+; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm3
+; GFNISSE-NEXT:    psubb %xmm3, %xmm5
+; GFNISSE-NEXT:    pand %xmm5, %xmm3
+; GFNISSE-NEXT:    gf2p8affineqb $8, %xmm6, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX1-LABEL: testv64i8u:
@@ -321,21 +307,21 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; GFNIAVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; GFNIAVX1-NEXT:    vpsubb %xmm2, %xmm5, %xmm2
+; GFNIAVX1-NEXT:    vpsubb %xmm0, %xmm5, %xmm0
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX1-NEXT:    vandnps %ymm4, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vandps %ymm0, %ymm4, %ymm0
 ; GFNIAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0
 ; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm4, %xmm4
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm3
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm4, %xmm4
-; GFNIAVX1-NEXT:    vpaddb %xmm5, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vpsubb %xmm4, %xmm5, %xmm4
+; GFNIAVX1-NEXT:    vpsubb %xmm1, %xmm5, %xmm1
 ; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; GFNIAVX1-NEXT:    vandnps %ymm3, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    vandps %ymm1, %ymm3, %ymm1
 ; GFNIAVX1-NEXT:    vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1
 ; GFNIAVX1-NEXT:    retq
 ;
@@ -343,14 +329,14 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; GFNIAVX2:       # %bb.0:
 ; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
-; GFNIAVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
-; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm0, %ymm4
-; GFNIAVX2-NEXT:    vpandn %ymm0, %ymm4, %ymm0
+; GFNIAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX2-NEXT:    vpsubb %ymm0, %ymm3, %ymm4
+; GFNIAVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664]
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
-; GFNIAVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm2
-; GFNIAVX2-NEXT:    vpandn %ymm1, %ymm2, %ymm1
+; GFNIAVX2-NEXT:    vpsubb %ymm1, %ymm3, %ymm2
+; GFNIAVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $8, %ymm4, %ymm1, %ymm1
 ; GFNIAVX2-NEXT:    retq
 ;
@@ -361,20 +347,20 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
 ; GFNIAVX512VL-NEXT:    vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
 ; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
-; GFNIAVX512VL-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT:    vpaddb %ymm3, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; GFNIAVX512VL-NEXT:    vpsubb %ymm1, %ymm3, %ymm1
+; GFNIAVX512VL-NEXT:    vpsubb %ymm0, %ymm3, %ymm0
 ; GFNIAVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; GFNIAVX512VL-NEXT:    vpandnq %zmm2, %zmm0, %zmm0
+; GFNIAVX512VL-NEXT:    vpandq %zmm0, %zmm2, %zmm0
 ; GFNIAVX512VL-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512VL-NEXT:    retq
 ;
 ; GFNIAVX512BW-LABEL: testv64i8u:
 ; GFNIAVX512BW:       # %bb.0:
 ; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
-; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm1
-; GFNIAVX512BW-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
+; GFNIAVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; GFNIAVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
+; GFNIAVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
 ; GFNIAVX512BW-NEXT:    retq
   %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1)



More information about the llvm-commits mailing list