[llvm] r318993 - [x86][icelake]GFNI

Davide Italiano via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 26 12:12:26 PST 2017


On Sun, Nov 26, 2017 at 1:36 AM, Coby Tayree via llvm-commits
<llvm-commits at lists.llvm.org> wrote:
> Author: coby
> Date: Sun Nov 26 01:36:41 2017
> New Revision: 318993
>
> URL: http://llvm.org/viewvc/llvm-project?rev=318993&view=rev
> Log:
> [x86][icelake]GFNI
> galois field arithmetic (GF(2^8)) insns:
> gf2p8affineinvqb
> gf2p8affineqb
> gf2p8mulb
> Differential Revision: https://reviews.llvm.org/D40373
>

This commit message isn't particularly useful.
In fact, you need to skim through the code to find out what these
instructions are supposed to do.
I think a short explanation, maybe with a reference to the intel
manual will go a long way.

e.g.

`[x86][icelake]GFNI`

could be spelled as

`[X86] Add support for yada yada yada`.

I don't think this should be reverted, but, given you have a lot of
these commits in the pipeline, please make sure to amend your commit
message before committing.

Thanks,

--
Davide

> Added:
>     llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll
>     llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll
>     llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll
>     llvm/trunk/test/MC/X86/avx512gfni-encoding.s
>     llvm/trunk/test/MC/X86/avx512vl_gfni-encoding.s
>     llvm/trunk/test/MC/X86/gfni-encoding.s
> Modified:
>     llvm/trunk/include/llvm/IR/IntrinsicsX86.td
>     llvm/trunk/lib/Support/Host.cpp
>     llvm/trunk/lib/Target/X86/X86.td
>     llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
>     llvm/trunk/lib/Target/X86/X86ISelLowering.h
>     llvm/trunk/lib/Target/X86/X86InstrAVX512.td
>     llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td
>     llvm/trunk/lib/Target/X86/X86InstrInfo.td
>     llvm/trunk/lib/Target/X86/X86InstrSSE.td
>     llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
>     llvm/trunk/lib/Target/X86/X86Subtarget.cpp
>     llvm/trunk/lib/Target/X86/X86Subtarget.h
>
> Modified: llvm/trunk/include/llvm/IR/IntrinsicsX86.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsX86.td?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/include/llvm/IR/IntrinsicsX86.td (original)
> +++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td Sun Nov 26 01:36:41 2017
> @@ -1341,6 +1341,57 @@ let TargetPrefix = "x86" in {  // All in
>
>  }
>
> +// GFNI Instructions
> +let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
> +  def int_x86_vgf2p8affineinvqb_128 :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v16qi">,
> +          Intrinsic<[llvm_v16i8_ty],
> +          [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
> +          [IntrNoMem]>;
> +  def int_x86_vgf2p8affineinvqb_256 :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v32qi">,
> +          Intrinsic<[llvm_v32i8_ty],
> +          [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty],
> +          [IntrNoMem]>;
> +  def int_x86_vgf2p8affineinvqb_512 :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v64qi">,
> +          Intrinsic<[llvm_v64i8_ty],
> +          [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i8_ty],
> +          [IntrNoMem]>;
> +
> +  def int_x86_vgf2p8affineqb_128 :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v16qi">,
> +          Intrinsic<[llvm_v16i8_ty],
> +          [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
> +          [IntrNoMem]>;
> +  def int_x86_vgf2p8affineqb_256 :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v32qi">,
> +          Intrinsic<[llvm_v32i8_ty],
> +          [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty],
> +          [IntrNoMem]>;
> +  def int_x86_vgf2p8affineqb_512 :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v64qi">,
> +          Intrinsic<[llvm_v64i8_ty],
> +          [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i8_ty],
> +          [IntrNoMem]>;
> +
> +  def int_x86_vgf2p8mulb_128     :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v16qi">,
> +          Intrinsic<[llvm_v16i8_ty],
> +          [llvm_v16i8_ty, llvm_v16i8_ty],
> +          [IntrNoMem]>;
> +  def int_x86_vgf2p8mulb_256     :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v32qi">,
> +          Intrinsic<[llvm_v32i8_ty],
> +          [llvm_v32i8_ty, llvm_v32i8_ty],
> +          [IntrNoMem]>;
> +  def int_x86_vgf2p8mulb_512     :
> +         GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v64qi">,
> +          Intrinsic<[llvm_v64i8_ty],
> +          [llvm_v64i8_ty, llvm_v64i8_ty],
> +          [IntrNoMem]>;
> +}
> +
>  // Vector blend
>  let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
>    def int_x86_avx_blendv_pd_256 : GCCBuiltin<"__builtin_ia32_blendvpd256">,
>
> Modified: llvm/trunk/lib/Support/Host.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Support/Host.cpp?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Support/Host.cpp (original)
> +++ llvm/trunk/lib/Support/Host.cpp Sun Nov 26 01:36:41 2017
> @@ -1217,6 +1217,7 @@ bool sys::getHostCPUFeatures(StringMap<b
>    Features["avx512vbmi"]      = HasLeaf7 && ((ECX >>  1) & 1) && HasAVX512Save;
>    Features["pku"]             = HasLeaf7 && ((ECX >>  4) & 1);
>    Features["avx512vbmi2"]     = HasLeaf7 && ((ECX >>  6) & 1) && HasAVX512Save;
> +  Features["gfni"]            = HasLeaf7 && ((ECX >>  8) & 1);
>    Features["vaes"]            = HasLeaf7 && ((ECX >>  9) & 1) && HasAVXSave;
>    Features["vpclmulqdq"]      = HasLeaf7 && ((ECX >> 10) & 1) && HasAVXSave;
>    Features["avx512vnni"]      = HasLeaf7 && ((ECX >> 11) & 1) && HasAVX512Save;
>
> Modified: llvm/trunk/lib/Target/X86/X86.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86.td?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86.td (original)
> +++ llvm/trunk/lib/Target/X86/X86.td Sun Nov 26 01:36:41 2017
> @@ -169,6 +169,9 @@ def FeatureBITALG  : SubtargetFeature<"a
>  def FeaturePCLMUL  : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
>                           "Enable packed carry-less multiplication instructions",
>                                 [FeatureSSE2]>;
> +def FeatureGFNI    : SubtargetFeature<"gfni", "HasGFNI", "true",
> +                         "Enable Galois Field Arithmetic Instructions",
> +                               [FeatureSSE2]>;
>  def FeatureVPCLMULQDQ : SubtargetFeature<"vpclmulqdq", "HasVPCLMULQDQ", "true",
>                                           "Enable vpclmulqdq instructions",
>                                           [FeatureAVX, FeaturePCLMUL]>;
> @@ -698,8 +701,8 @@ def ICLFeatures : ProcessorFeatures<CNLF
>    FeatureVBMI2,
>    FeatureVNNI,
>    FeatureVPCLMULQDQ,
> -  FeatureVPOPCNTDQ
> -  // TODO: Add GFNI when it is implemented.
> +  FeatureVPOPCNTDQ,
> +  FeatureGFNI
>  ]>;
>
>  class IcelakeProc<string Name> : ProcModel<Name, SkylakeServerModel,
>
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Nov 26 01:36:41 2017
> @@ -25254,6 +25254,9 @@ const char *X86TargetLowering::getTarget
>    case X86ISD::VPDPWSSD:           return "X86ISD::VPDPWSSD";
>    case X86ISD::VPDPWSSDS:          return "X86ISD::VPDPWSSDS";
>    case X86ISD::VPSHUFBITQMB:       return "X86ISD::VPSHUFBITQMB";
> +  case X86ISD::GF2P8MULB:          return "X86ISD::GF2P8MULB";
> +  case X86ISD::GF2P8AFFINEQB:      return "X86ISD::GF2P8AFFINEQB";
> +  case X86ISD::GF2P8AFFINEINVQB:   return "X86ISD::GF2P8AFFINEINVQB";
>    }
>    return nullptr;
>  }
>
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Sun Nov 26 01:36:41 2017
> @@ -587,6 +587,9 @@ namespace llvm {
>        // Conversions between float and half-float.
>        CVTPS2PH, CVTPH2PS, CVTPH2PS_RND,
>
> +      // Galois Field Arithmetic Instructions
> +      GF2P8AFFINEINVQB, GF2P8AFFINEQB, GF2P8MULB,
> +
>        // LWP insert record.
>        LWPINS,
>
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Sun Nov 26 01:36:41 2017
> @@ -10242,3 +10242,55 @@ multiclass VPSHUFBITQMB_common<AVX512VLV
>
>  defm VPSHUFBITQMB : VPSHUFBITQMB_common<avx512vl_i8_info>;
>
> +//===----------------------------------------------------------------------===//
> +// GFNI
> +//===----------------------------------------------------------------------===//
> +
> +multiclass GF2P8MULB_avx512_common<bits<8> Op, string OpStr, SDNode OpNode> {
> +  let Predicates = [HasGFNI, HasAVX512, HasBWI] in
> +  defm Z      : avx512_binop_rm<Op, OpStr, OpNode, v64i8_info,
> +                                SSE_INTALU_ITINS_P, 1>, EVEX_V512;
> +  let Predicates = [HasGFNI, HasVLX, HasBWI] in {
> +    defm Z256 : avx512_binop_rm<Op, OpStr, OpNode, v32i8x_info,
> +                                SSE_INTALU_ITINS_P, 1>, EVEX_V256;
> +    defm Z128 : avx512_binop_rm<Op, OpStr, OpNode, v16i8x_info,
> +                                SSE_INTALU_ITINS_P, 1>, EVEX_V128;
> +  }
> +}
> +
> +defm GF2P8MULB : GF2P8MULB_avx512_common<0xCF, "vgf2p8mulb", X86GF2P8mulb>,
> +                 EVEX_CD8<8, CD8VF>, T8PD;
> +
> +multiclass GF2P8AFFINE_avx512_rmb_imm<bits<8> Op, string OpStr, SDNode OpNode,
> +                                      X86VectorVTInfo VTI,
> +                                      X86VectorVTInfo BcstVTI>
> +           : avx512_3Op_rm_imm8<Op, OpStr, OpNode, VTI, VTI> {
> +  let ExeDomain = VTI.ExeDomain in
> +  defm rmbi : AVX512_maskable<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
> +                (ins VTI.RC:$src1, VTI.ScalarMemOp:$src2, u8imm:$src3),
> +                OpStr, "$src3, ${src2}"##BcstVTI.BroadcastStr##", $src1",
> +                "$src1, ${src2}"##BcstVTI.BroadcastStr##", $src3",
> +                (OpNode (VTI.VT VTI.RC:$src1),
> +                 (bitconvert (BcstVTI.VT (X86VBroadcast (loadi64 addr:$src2)))),
> +                 (i8 imm:$src3))>, EVEX_B;
> +}
> +
> +multiclass GF2P8AFFINE_avx512_common<bits<8> Op, string OpStr, SDNode OpNode> {
> +  let Predicates = [HasGFNI, HasAVX512, HasBWI] in
> +  defm Z      : GF2P8AFFINE_avx512_rmb_imm<Op, OpStr, OpNode, v64i8_info,
> +                                           v8i64_info>, EVEX_V512;
> +  let Predicates = [HasGFNI, HasVLX, HasBWI] in {
> +    defm Z256 : GF2P8AFFINE_avx512_rmb_imm<Op, OpStr, OpNode, v32i8x_info,
> +                                           v4i64x_info>, EVEX_V256;
> +    defm Z128 : GF2P8AFFINE_avx512_rmb_imm<Op, OpStr, OpNode, v16i8x_info,
> +                                           v2i64x_info>, EVEX_V128;
> +  }
> +}
> +
> +defm GF2P8AFFINEINVQB : GF2P8AFFINE_avx512_common<0xCF, "vgf2p8affineinvqb",
> +                                                  X86GF2P8affineinvqb>,
> +                        EVEX_4V, EVEX_CD8<8, CD8VF>, VEX_W, AVX512AIi8Base;
> +defm GF2P8AFFINEQB    : GF2P8AFFINE_avx512_common<0xCE, "vgf2p8affineqb",
> +                                                  X86GF2P8affineqb>,
> +                        EVEX_4V, EVEX_CD8<8, CD8VF>, VEX_W, AVX512AIi8Base;
> +
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td Sun Nov 26 01:36:41 2017
> @@ -672,6 +672,11 @@ def X86vfproundRnd: SDNode<"X86ISD::VFPR
>
>  def X86cvt2mask   : SDNode<"X86ISD::CVT2MASK", SDTIntTruncOp>;
>
> +// galois field arithmetic
> +def X86GF2P8affineinvqb : SDNode<"X86ISD::GF2P8AFFINEINVQB", SDTBlend>;
> +def X86GF2P8affineqb    : SDNode<"X86ISD::GF2P8AFFINEQB", SDTBlend>;
> +def X86GF2P8mulb        : SDNode<"X86ISD::GF2P8MULB", SDTIntBinOp>;
> +
>  //===----------------------------------------------------------------------===//
>  // SSE Complex Patterns
>  //===----------------------------------------------------------------------===//
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Sun Nov 26 01:36:41 2017
> @@ -848,6 +848,7 @@ def HasPCLMUL    : Predicate<"Subtarget-
>  def NoVLX_Or_NoVPCLMULQDQ :
>                      Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">;
>  def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">;
> +def HasGFNI      : Predicate<"Subtarget->hasGFNI()">;
>  def HasFMA       : Predicate<"Subtarget->hasFMA()">;
>  def HasFMA4      : Predicate<"Subtarget->hasFMA4()">;
>  def NoFMA4       : Predicate<"!Subtarget->hasFMA4()">;
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Sun Nov 26 01:36:41 2017
> @@ -8466,3 +8466,82 @@ def : Pat<(xor FR128:$src1, FR128:$src2)
>            (COPY_TO_REGCLASS
>             (XORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
>                      (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
> +
> +//===----------------------------------------------------------------------===//
> +// GFNI instructions
> +//===----------------------------------------------------------------------===//
> +
> +multiclass GF2P8MULB_rm<string OpcodeStr, ValueType OpVT,
> +                        RegisterClass RC, PatFrag MemOpFrag,
> +                        X86MemOperand X86MemOp, bit Is2Addr = 0> {
> +  let ExeDomain = SSEPackedInt,
> +      AsmString = !if(Is2Addr,
> +        OpcodeStr##"\t{$src2, $dst|$dst, $src2}",
> +        OpcodeStr##"\t{$src2, $src1, $dst|$dst, $src1, $src2}") in {
> +    let isCommutable = 1 in
> +    def rr : PDI<0xCF, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), "",
> +                 [(set RC:$dst, (OpVT (X86GF2P8mulb RC:$src1, RC:$src2)))],
> +                 SSE_INTALU_ITINS_P.rr>,
> +             Sched<[SSE_INTALU_ITINS_P.Sched]>, T8PD;
> +
> +    def rm : PDI<0xCF, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, X86MemOp:$src2), "",
> +                 [(set RC:$dst, (OpVT (X86GF2P8mulb RC:$src1,
> +                                 (bitconvert (MemOpFrag addr:$src2)))))],
> +                 SSE_INTALU_ITINS_P.rm>,
> +             Sched<[SSE_INTALU_ITINS_P.Sched.Folded, ReadAfterLd]>, T8PD;
> +  }
> +}
> +
> +multiclass GF2P8AFFINE_rmi<bits<8> Op, string OpStr, ValueType OpVT,
> +                           SDNode OpNode, RegisterClass RC, PatFrag MemOpFrag,
> +                           X86MemOperand X86MemOp, bit Is2Addr = 0> {
> +  let AsmString = !if(Is2Addr,
> +      OpStr##"\t{$src3, $src2, $dst|$dst, $src2, $src3}",
> +      OpStr##"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}") in {
> +  def rri : Ii8<Op, MRMSrcReg, (outs RC:$dst),
> +              (ins RC:$src1, RC:$src2, u8imm:$src3), "",
> +              [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))],
> +              SSE_INTALU_ITINS_P.rr, SSEPackedInt>,
> +              Sched<[WriteVecALU]>;
> +  def rmi : Ii8<Op, MRMSrcMem, (outs RC:$dst),
> +              (ins RC:$src1, X86MemOp:$src2, u8imm:$src3), "",
> +              [(set RC:$dst, (OpVT (OpNode RC:$src1,
> +                                    (bitconvert (MemOpFrag addr:$src2)),
> +                              imm:$src3)))],
> +              SSE_INTALU_ITINS_P.rm, SSEPackedInt>,
> +              Sched<[WriteVecALU.Folded, ReadAfterLd]>;
> +  }
> +}
> +
> +multiclass GF2P8AFFINE_common<bits<8> Op, string OpStr, SDNode OpNode> {
> +  let Constraints = "$src1 = $dst",
> +      Predicates  = [HasGFNI, UseSSE2] in
> +  defm NAME         : GF2P8AFFINE_rmi<Op, OpStr, v16i8, OpNode,
> +                                      VR128, loadv2i64, i128mem, 1>;
> +  let Predicates  = [HasGFNI, HasAVX, NoVLX_Or_NoBWI] in {
> +    defm V##NAME    : GF2P8AFFINE_rmi<Op, "v"##OpStr, v16i8, OpNode, VR128,
> +                                      loadv2i64, i128mem>, VEX_4V, VEX_W;
> +    defm V##NAME##Y : GF2P8AFFINE_rmi<Op, "v"##OpStr, v32i8, OpNode, VR256,
> +                                      loadv4i64, i256mem>, VEX_4V, VEX_L, VEX_W;
> +  }
> +}
> +
> +// GF2P8MULB
> +let Constraints = "$src1 = $dst",
> +    Predicates  = [HasGFNI, UseSSE2] in
> +defm GF2P8MULB      : GF2P8MULB_rm<"gf2p8mulb", v16i8, VR128, memopv2i64,
> +                                    i128mem, 1>;
> +let Predicates  = [HasGFNI, HasAVX, NoVLX_Or_NoBWI] in {
> +  defm VGF2P8MULB   : GF2P8MULB_rm<"vgf2p8mulb", v16i8, VR128, loadv2i64,
> +                                   i128mem>, VEX_4V;
> +  defm VGF2P8MULBY  : GF2P8MULB_rm<"vgf2p8mulb", v32i8, VR256, loadv4i64,
> +                                   i256mem>, VEX_4V, VEX_L;
> +}
> +// GF2P8AFFINEINVQB, GF2P8AFFINEQB
> +let isCommutable = 0 in {
> +  defm GF2P8AFFINEINVQB : GF2P8AFFINE_common<0xCF, "gf2p8affineinvqb",
> +                                             X86GF2P8affineinvqb>, TAPD;
> +  defm GF2P8AFFINEQB    : GF2P8AFFINE_common<0xCE, "gf2p8affineqb",
> +                                             X86GF2P8affineqb>, TAPD;
> +}
> +
>
> Modified: llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h (original)
> +++ llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h Sun Nov 26 01:36:41 2017
> @@ -1170,7 +1170,7 @@ static const IntrinsicData  IntrinsicsWi
>    X86_INTRINSIC_DATA(avx512_mask_vpdpwssds_256, FMA_OP_MASK, X86ISD::VPDPWSSDS, 0),
>    X86_INTRINSIC_DATA(avx512_mask_vpdpwssds_512, FMA_OP_MASK, X86ISD::VPDPWSSDS, 0),
>
> -  X86_INTRINSIC_DATA(avx512_mask_vpermi2var_d_128, VPERM_3OP_MASK,
> + X86_INTRINSIC_DATA(avx512_mask_vpermi2var_d_128, VPERM_3OP_MASK,
>                      X86ISD::VPERMIV3, 0),
>    X86_INTRINSIC_DATA(avx512_mask_vpermi2var_d_256, VPERM_3OP_MASK,
>                      X86ISD::VPERMIV3, 0),
> @@ -1700,6 +1700,26 @@ static const IntrinsicData  IntrinsicsWi
>    X86_INTRINSIC_DATA(vcvtph2ps_256,     INTR_TYPE_1OP, X86ISD::CVTPH2PS, 0),
>    X86_INTRINSIC_DATA(vcvtps2ph_128,     INTR_TYPE_2OP, X86ISD::CVTPS2PH, 0),
>    X86_INTRINSIC_DATA(vcvtps2ph_256,     INTR_TYPE_2OP, X86ISD::CVTPS2PH, 0),
> +
> +  X86_INTRINSIC_DATA(vgf2p8affineinvqb_128, INTR_TYPE_3OP,
> +                     X86ISD::GF2P8AFFINEINVQB, 0),
> +  X86_INTRINSIC_DATA(vgf2p8affineinvqb_256, INTR_TYPE_3OP,
> +                     X86ISD::GF2P8AFFINEINVQB, 0),
> +  X86_INTRINSIC_DATA(vgf2p8affineinvqb_512, INTR_TYPE_3OP,
> +                     X86ISD::GF2P8AFFINEINVQB, 0),
> +  X86_INTRINSIC_DATA(vgf2p8affineqb_128, INTR_TYPE_3OP,
> +                     X86ISD::GF2P8AFFINEQB, 0),
> +  X86_INTRINSIC_DATA(vgf2p8affineqb_256, INTR_TYPE_3OP,
> +                     X86ISD::GF2P8AFFINEQB, 0),
> +  X86_INTRINSIC_DATA(vgf2p8affineqb_512, INTR_TYPE_3OP,
> +                     X86ISD::GF2P8AFFINEQB, 0),
> +  X86_INTRINSIC_DATA(vgf2p8mulb_128, INTR_TYPE_2OP,
> +                     X86ISD::GF2P8MULB, 0),
> +  X86_INTRINSIC_DATA(vgf2p8mulb_256, INTR_TYPE_2OP,
> +                     X86ISD::GF2P8MULB, 0),
> +  X86_INTRINSIC_DATA(vgf2p8mulb_512, INTR_TYPE_2OP,
> +                     X86ISD::GF2P8MULB, 0),
> +
>    X86_INTRINSIC_DATA(xop_vpcomb,        INTR_TYPE_3OP, X86ISD::VPCOM, 0),
>    X86_INTRINSIC_DATA(xop_vpcomd,        INTR_TYPE_3OP, X86ISD::VPCOM, 0),
>    X86_INTRINSIC_DATA(xop_vpcomq,        INTR_TYPE_3OP, X86ISD::VPCOM, 0),
>
> Modified: llvm/trunk/lib/Target/X86/X86Subtarget.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Subtarget.cpp?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86Subtarget.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86Subtarget.cpp Sun Nov 26 01:36:41 2017
> @@ -299,6 +299,7 @@ void X86Subtarget::initializeEnvironment
>    HasXSAVES = false;
>    HasPCLMUL = false;
>    HasVPCLMULQDQ = false;
> +  HasGFNI = false;
>    HasFMA = false;
>    HasFMA4 = false;
>    HasXOP = false;
>
> Modified: llvm/trunk/lib/Target/X86/X86Subtarget.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Subtarget.h?rev=318993&r1=318992&r2=318993&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86Subtarget.h (original)
> +++ llvm/trunk/lib/Target/X86/X86Subtarget.h Sun Nov 26 01:36:41 2017
> @@ -128,6 +128,9 @@ protected:
>    bool HasPCLMUL;
>    bool HasVPCLMULQDQ;
>
> +  /// Target has Galois Field Arithmetic instructions
> +  bool HasGFNI;
> +
>    /// Target has 3-operand fused multiply-add
>    bool HasFMA;
>
> @@ -480,6 +483,7 @@ public:
>    bool hasXSAVES() const { return HasXSAVES; }
>    bool hasPCLMUL() const { return HasPCLMUL; }
>    bool hasVPCLMULQDQ() const { return HasVPCLMULQDQ; }
> +  bool hasGFNI() const { return HasGFNI; }
>    // Prefer FMA4 to FMA - its better for commutation/memory folding and
>    // has equal or better performance on all supported targets.
>    bool hasFMA() const { return HasFMA; }
>
> Added: llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll?rev=318993&view=auto
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll (added)
> +++ llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll Sun Nov 26 01:36:41 2017
> @@ -0,0 +1,63 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
> +; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+gfni,+avx -show-mc-encoding | FileCheck %s
> +
> +declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
> +define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2) {
> +; CHECK-LABEL: test_vgf2p8affineinvqb_128:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    vgf2p8affineinvqb $11, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0xf9,0xcf,0xc1,0x0b]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> +  %1 = call <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
> +  ret <16 x i8> %1
> +}
> +
> +declare <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i8)
> +define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2) {
> +; CHECK-LABEL: test_vgf2p8affineinvqb_256:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    vgf2p8affineinvqb $11, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0xfd,0xcf,0xc1,0x0b]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> +  %1 = call <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8> %src1, <32 x i8> %src2, i8 11)
> +  ret <32 x i8> %1
> +}
> +
> +declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
> +define <16 x i8> @test_vgf2p8affineqb(<16 x i8> %src1, <16 x i8> %src2) {
> +; CHECK-LABEL: test_vgf2p8affineqb:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    vgf2p8affineqb $11, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0xf9,0xce,0xc1,0x0b]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> +  %1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
> +  ret <16 x i8> %1
> +}
> +
> +declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
> +define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2) {
> +; CHECK-LABEL: test_vgf2p8affineqb_256:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    vgf2p8affineqb $11, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0xfd,0xce,0xc1,0x0b]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> +  %1 = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> %src1, <32 x i8> %src2, i8 11)
> +  ret <32 x i8> %1
> +}
> +
> +declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
> +define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) {
> +; CHECK-LABEL: test_vgf2p8mulb_128:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    vgf2p8mulb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xcf,0xc1]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> +  %1 = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2)
> +  ret <16 x i8> %1
> +}
> +
> +declare <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8>, <32 x i8>)
> +define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2) {
> +; CHECK-LABEL: test_vgf2p8mulb_256:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    vgf2p8mulb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0xcf,0xc1]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> +  %1 = call <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8> %src1, <32 x i8> %src2)
> +  ret <32 x i8> %1
> +}
> +
>
> Added: llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll?rev=318993&view=auto
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll (added)
> +++ llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll Sun Nov 26 01:36:41 2017
> @@ -0,0 +1,183 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
> +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512vl,+gfni,+avx512bw --show-mc-encoding | FileCheck %s
> +
> +declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
> +define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
> +; CHECK-LABEL: test_vgf2p8affineinvqb_128:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xf9,0xcf,0xd9,0x03]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0x89,0xcf,0xe1,0x03]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0xcf,0xd1,0x03]
> +; CHECK-NEXT:    vpxor %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xc3]
> +; CHECK-NEXT:    vpxor %xmm0, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i16 %mask to <16 x i1>
> +  %2 = call <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 3)
> +  %3 = select <16 x i1> %1, <16 x i8> %2, <16 x i8> zeroinitializer
> +  %4 = select <16 x i1> %1, <16 x i8> %2, <16 x i8> %passthru
> +  %5 = xor <16 x i8> %3, %4
> +  %6 = xor <16 x i8> %5, %2
> +  ret <16 x i8> %6
> +}
> +
> +declare <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i8)
> +define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
> +; CHECK-LABEL: test_vgf2p8affineinvqb_256:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xfd,0xcf,0xd9,0x03]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0xcf,0xe1,0x03]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0xcf,0xd1,0x03]
> +; CHECK-NEXT:    vpxor %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xc3]
> +; CHECK-NEXT:    vpxor %ymm0, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i32 %mask to <32 x i1>
> +  %2 = call <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8> %src1, <32 x i8> %src2, i8 3)
> +  %3 = select <32 x i1> %1, <32 x i8> %2, <32 x i8> zeroinitializer
> +  %4 = select <32 x i1> %1, <32 x i8> %2, <32 x i8> %passthru
> +  %5 = xor <32 x i8> %3, %4
> +  %6 = xor <32 x i8> %5, %2
> +  ret <32 x i8> %6
> +}
> +
> +declare <64 x i8> @llvm.x86.vgf2p8affineinvqb.512(<64 x i8>, <64 x i8>, i8)
> +define <64 x i8> @test_vgf2p8affineinvqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
> +; CHECK-LABEL: test_vgf2p8affineinvqb_512:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf3,0xfd,0x48,0xcf,0xd9,0x03]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xc9,0xcf,0xe1,0x03]
> +; CHECK-NEXT:    vgf2p8affineinvqb $3, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x49,0xcf,0xd1,0x03]
> +; CHECK-NEXT:    vpxorq %zmm3, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xef,0xc3]
> +; CHECK-NEXT:    vpxorq %zmm0, %zmm4, %zmm0 ## encoding: [0x62,0xf1,0xdd,0x48,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i64 %mask to <64 x i1>
> +  %2 = call <64 x i8> @llvm.x86.vgf2p8affineinvqb.512(<64 x i8> %src1, <64 x i8> %src2, i8 3)
> +  %3 = select <64 x i1> %1, <64 x i8> %2, <64 x i8> zeroinitializer
> +  %4 = select <64 x i1> %1, <64 x i8> %2, <64 x i8> %passthru
> +  %5 = xor <64 x i8> %3, %4
> +  %6 = xor <64 x i8> %5, %2
> +  ret <64 x i8> %6
> +}
> +
> +declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
> +define <16 x i8> @test_vgf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
> +; CHECK-LABEL: test_vgf2p8affineqb_128:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xf9,0xce,0xd9,0x03]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0x89,0xce,0xe1,0x03]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0xce,0xd1,0x03]
> +; CHECK-NEXT:    vpxor %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xc3]
> +; CHECK-NEXT:    vpxor %xmm0, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i16 %mask to <16 x i1>
> +  %2 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 3)
> +  %3 = select <16 x i1> %1, <16 x i8> %2, <16 x i8> zeroinitializer
> +  %4 = select <16 x i1> %1, <16 x i8> %2, <16 x i8> %passthru
> +  %5 = xor <16 x i8> %3, %4
> +  %6 = xor <16 x i8> %5, %2
> +  ret <16 x i8> %6
> +}
> +
> +declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
> +define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
> +; CHECK-LABEL: test_vgf2p8affineqb_256:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xfd,0xce,0xd9,0x03]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0xce,0xe1,0x03]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0xce,0xd1,0x03]
> +; CHECK-NEXT:    vpxor %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xc3]
> +; CHECK-NEXT:    vpxor %ymm0, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i32 %mask to <32 x i1>
> +  %2 = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> %src1, <32 x i8> %src2, i8 3)
> +  %3 = select <32 x i1> %1, <32 x i8> %2, <32 x i8> zeroinitializer
> +  %4 = select <32 x i1> %1, <32 x i8> %2, <32 x i8> %passthru
> +  %5 = xor <32 x i8> %3, %4
> +  %6 = xor <32 x i8> %5, %2
> +  ret <32 x i8> %6
> +}
> +
> +declare <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8>, <64 x i8>, i8)
> +define <64 x i8> @test_vgf2p8affineqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
> +; CHECK-LABEL: test_vgf2p8affineqb_512:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf3,0xfd,0x48,0xce,0xd9,0x03]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xc9,0xce,0xe1,0x03]
> +; CHECK-NEXT:    vgf2p8affineqb $3, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x49,0xce,0xd1,0x03]
> +; CHECK-NEXT:    vpxorq %zmm3, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xef,0xc3]
> +; CHECK-NEXT:    vpxorq %zmm0, %zmm4, %zmm0 ## encoding: [0x62,0xf1,0xdd,0x48,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i64 %mask to <64 x i1>
> +  %2 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src1, <64 x i8> %src2, i8 3)
> +  %3 = select <64 x i1> %1, <64 x i8> %2, <64 x i8> zeroinitializer
> +  %4 = select <64 x i1> %1, <64 x i8> %2, <64 x i8> %passthru
> +  %5 = xor <64 x i8> %3, %4
> +  %6 = xor <64 x i8> %5, %2
> +  ret <64 x i8> %6
> +}
> +
> +declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
> +define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
> +; CHECK-LABEL: test_vgf2p8mulb_128:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8mulb %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xcf,0xd9]
> +; CHECK-NEXT:    vgf2p8mulb %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xcf,0xe1]
> +; CHECK-NEXT:    vgf2p8mulb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xcf,0xd1]
> +; CHECK-NEXT:    vpxor %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xc3]
> +; CHECK-NEXT:    vpxor %xmm0, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i16 %mask to <16 x i1>
> +  %2 = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2)
> +  %3 = select <16 x i1> %1, <16 x i8> %2, <16 x i8> zeroinitializer
> +  %4 = select <16 x i1> %1, <16 x i8> %2, <16 x i8> %passthru
> +  %5 = xor <16 x i8> %3, %4
> +  %6 = xor <16 x i8> %5, %2
> +  ret <16 x i8> %6
> +}
> +
> +declare <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8>, <32 x i8>)
> +define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
> +; CHECK-LABEL: test_vgf2p8mulb_256:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8mulb %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xcf,0xd9]
> +; CHECK-NEXT:    vgf2p8mulb %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xcf,0xe1]
> +; CHECK-NEXT:    vgf2p8mulb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xcf,0xd1]
> +; CHECK-NEXT:    vpxor %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xc3]
> +; CHECK-NEXT:    vpxor %ymm0, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i32 %mask to <32 x i1>
> +  %2 = call <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8> %src1, <32 x i8> %src2)
> +  %3 = select <32 x i1> %1, <32 x i8> %2, <32 x i8> zeroinitializer
> +  %4 = select <32 x i1> %1, <32 x i8> %2, <32 x i8> %passthru
> +  %5 = xor <32 x i8> %3, %4
> +  %6 = xor <32 x i8> %5, %2
> +  ret <32 x i8> %6
> +}
> +
> +declare <64 x i8> @llvm.x86.vgf2p8mulb.512(<64 x i8>, <64 x i8>)
> +define <64 x i8> @test_vgf2p8mulb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
> +; CHECK-LABEL: test_vgf2p8mulb_512:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
> +; CHECK-NEXT:    vgf2p8mulb %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0xcf,0xd9]
> +; CHECK-NEXT:    vgf2p8mulb %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0xcf,0xe1]
> +; CHECK-NEXT:    vgf2p8mulb %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xcf,0xd1]
> +; CHECK-NEXT:    vpxorq %zmm3, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xef,0xc3]
> +; CHECK-NEXT:    vpxorq %zmm0, %zmm4, %zmm0 ## encoding: [0x62,0xf1,0xdd,0x48,0xef,0xc0]
> +; CHECK-NEXT:    retq ## encoding: [0xc3]
> +  %1 = bitcast i64 %mask to <64 x i1>
> +  %2 = call <64 x i8> @llvm.x86.vgf2p8mulb.512(<64 x i8> %src1, <64 x i8> %src2)
> +  %3 = select <64 x i1> %1, <64 x i8> %2, <64 x i8> zeroinitializer
> +  %4 = select <64 x i1> %1, <64 x i8> %2, <64 x i8> %passthru
> +  %5 = xor <64 x i8> %3, %4
> +  %6 = xor <64 x i8> %5, %2
> +  ret <64 x i8> %6
> +}
> +
>
> Added: llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll?rev=318993&view=auto
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll (added)
> +++ llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll Sun Nov 26 01:36:41 2017
> @@ -0,0 +1,33 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
> +; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+gfni -show-mc-encoding | FileCheck %s
> +
> +declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
> +define <16 x i8> @test_gf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2) {
> +; CHECK-LABEL: test_gf2p8affineinvqb_128:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    gf2p8affineinvqb $11, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xcf,0xc1,0x0b]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> + %1 = call <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
> + ret <16 x i8> %1
> +}
> +
> +declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
> +define <16 x i8> @test_gf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2) {
> +; CHECK-LABEL: test_gf2p8affineqb_128:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    gf2p8affineqb $11, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xce,0xc1,0x0b]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> + %1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
> + ret <16 x i8> %1
> +}
> +
> +declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
> +define <16 x i8> @test_gf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) {
> +; CHECK-LABEL: test_gf2p8mulb_128:
> +; CHECK:       ## BB#0:
> +; CHECK-NEXT:    gf2p8mulb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0xcf,0xc1]
> +; CHECK-NEXT:    retl ## encoding: [0xc3]
> +  %1 = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2)
> +  ret <16 x i8> %1
> +}
> +
>
> Added: llvm/trunk/test/MC/X86/avx512gfni-encoding.s
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/avx512gfni-encoding.s?rev=318993&view=auto
> ==============================================================================
> --- llvm/trunk/test/MC/X86/avx512gfni-encoding.s (added)
> +++ llvm/trunk/test/MC/X86/avx512gfni-encoding.s Sun Nov 26 01:36:41 2017
> @@ -0,0 +1,178 @@
> +// RUN: llvm-mc -triple x86_64-unknown-unknown -mattr=+gfni,+avx512f,+avx512bw --show-encoding < %s | FileCheck %s
> +
> +// CHECK: vgf2p8affineinvqb $7, %zmm2, %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xcf,0xca,0x07]
> +          vgf2p8affineinvqb $7, %zmm2, %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineqb $7, %zmm2, %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xce,0xca,0x07]
> +          vgf2p8affineqb $7, %zmm2, %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineinvqb $7, %zmm2, %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xcf,0xca,0x07]
> +          vgf2p8affineinvqb $7, %zmm2, %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb $7, %zmm2, %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xce,0xca,0x07]
> +          vgf2p8affineqb $7, %zmm2, %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, (%rcx), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb  $7, (%rcx), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -256(%rsp), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xcf,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineinvqb  $7, -256(%rsp), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 256(%rsp), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xcf,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineinvqb  $7, 256(%rsp), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -256(%rsp), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xce,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineqb  $7, -256(%rsp), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineqb  $7, 256(%rsp), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xce,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineqb  $7, 256(%rsp), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, (%rcx), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb  $7, (%rcx), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -256(%rsp), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xcf,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineinvqb  $7, -256(%rsp), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, 256(%rsp), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xcf,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineinvqb  $7, 256(%rsp), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -256(%rsp), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xce,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineqb  $7, -256(%rsp), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, 256(%rsp), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xce,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineqb  $7, 256(%rsp), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb %zmm2, %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x40,0xcf,0xca]
> +          vgf2p8mulb %zmm2, %zmm20, %zmm1
> +
> +// CHECK: vgf2p8mulb %zmm2, %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x42,0xcf,0xca]
> +          vgf2p8mulb %zmm2, %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  (%rcx), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x40,0xcf,0x09]
> +          vgf2p8mulb  (%rcx), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8mulb  -256(%rsp), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x40,0xcf,0x4c,0x24,0xfc]
> +          vgf2p8mulb  -256(%rsp), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8mulb  256(%rsp), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x40,0xcf,0x4c,0x24,0x04]
> +          vgf2p8mulb  256(%rsp), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8mulb  268435456(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x40,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          vgf2p8mulb  268435456(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8mulb  -536870912(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x40,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870912(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8mulb  -536870910(%rcx,%r14,8), %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x40,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870910(%rcx,%r14,8), %zmm20, %zmm1
> +
> +// CHECK: vgf2p8mulb  (%rcx), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x42,0xcf,0x09]
> +          vgf2p8mulb  (%rcx), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -256(%rsp), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x42,0xcf,0x4c,0x24,0xfc]
> +          vgf2p8mulb  -256(%rsp), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  256(%rsp), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x42,0xcf,0x4c,0x24,0x04]
> +          vgf2p8mulb  256(%rsp), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x42,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          vgf2p8mulb  268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x42,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x42,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb $7, (%rcx){1to8}, %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x50,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb $7, (%rcx){1to8}, %zmm20, %zmm1
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx){1to8}, %zmm20, %zmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x50,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx){1to8}, %zmm20, %zmm1
> +
>
> Added: llvm/trunk/test/MC/X86/avx512vl_gfni-encoding.s
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/avx512vl_gfni-encoding.s?rev=318993&view=auto
> ==============================================================================
> --- llvm/trunk/test/MC/X86/avx512vl_gfni-encoding.s (added)
> +++ llvm/trunk/test/MC/X86/avx512vl_gfni-encoding.s Sun Nov 26 01:36:41 2017
> @@ -0,0 +1,354 @@
> +// RUN: llvm-mc -triple x86_64-unknown-unknown -mattr=+gfni,+avx512vl,+avx512bw --show-encoding < %s | FileCheck %s
> +
> +// CHECK: vgf2p8affineinvqb $7, %xmm2, %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xcf,0xca,0x07]
> +          vgf2p8affineinvqb $7, %xmm2, %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineqb $7, %xmm2, %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xce,0xca,0x07]
> +          vgf2p8affineqb $7, %xmm2, %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb $7, %xmm2, %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xcf,0xca,0x07]
> +          vgf2p8affineinvqb $7, %xmm2, %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb $7, %xmm2, %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xce,0xca,0x07]
> +          vgf2p8affineqb $7, %xmm2, %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, (%rcx), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb  $7, (%rcx), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -64(%rsp), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xcf,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineinvqb  $7, -64(%rsp), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 64(%rsp), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xcf,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineinvqb  $7, 64(%rsp), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -64(%rsp), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xce,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineqb  $7, -64(%rsp), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, 64(%rsp), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xce,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineqb  $7, 64(%rsp), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, (%rcx), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb  $7, (%rcx), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -64(%rsp), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xcf,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineinvqb  $7, -64(%rsp), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, 64(%rsp), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xcf,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineinvqb  $7, 64(%rsp), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -64(%rsp), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xce,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineqb  $7, -64(%rsp), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, 64(%rsp), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xce,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineqb  $7, 64(%rsp), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb $7, %ymm2, %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xcf,0xca,0x07]
> +          vgf2p8affineinvqb $7, %ymm2, %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineqb $7, %ymm2, %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xce,0xca,0x07]
> +          vgf2p8affineqb $7, %ymm2, %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb $7, %ymm2, %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xcf,0xca,0x07]
> +          vgf2p8affineinvqb $7, %ymm2, %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb $7, %ymm2, %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xce,0xca,0x07]
> +          vgf2p8affineqb $7, %ymm2, %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, (%rcx), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb  $7, (%rcx), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -128(%rsp), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xcf,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineinvqb  $7, -128(%rsp), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 128(%rsp), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xcf,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineinvqb  $7, 128(%rsp), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, -128(%rsp), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xce,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineqb  $7, -128(%rsp), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, 128(%rsp), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xce,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineqb  $7, 128(%rsp), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, (%rcx), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb  $7, (%rcx), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -128(%rsp), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xcf,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineinvqb  $7, -128(%rsp), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, 128(%rsp), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xcf,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineinvqb  $7, 128(%rsp), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -128(%rsp), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xce,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineqb  $7, -128(%rsp), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, 128(%rsp), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xce,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineqb  $7, 128(%rsp), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8mulb %xmm2, %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x00,0xcf,0xca]
> +          vgf2p8mulb %xmm2, %xmm20, %xmm1
> +
> +// CHECK: vgf2p8mulb %xmm2, %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x02,0xcf,0xca]
> +          vgf2p8mulb %xmm2, %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  (%rcx), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x00,0xcf,0x09]
> +          vgf2p8mulb  (%rcx), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8mulb  -64(%rsp), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x00,0xcf,0x4c,0x24,0xfc]
> +          vgf2p8mulb  -64(%rsp), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8mulb  64(%rsp), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x00,0xcf,0x4c,0x24,0x04]
> +          vgf2p8mulb  64(%rsp), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8mulb  268435456(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x00,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          vgf2p8mulb  268435456(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8mulb  -536870912(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x00,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870912(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8mulb  -536870910(%rcx,%r14,8), %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x00,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870910(%rcx,%r14,8), %xmm20, %xmm1
> +
> +// CHECK: vgf2p8mulb  (%rcx), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x02,0xcf,0x09]
> +          vgf2p8mulb  (%rcx), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -64(%rsp), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x02,0xcf,0x4c,0x24,0xfc]
> +          vgf2p8mulb  -64(%rsp), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  64(%rsp), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x02,0xcf,0x4c,0x24,0x04]
> +          vgf2p8mulb  64(%rsp), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x02,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          vgf2p8mulb  268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x02,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x02,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2}
> +
> +// CHECK: vgf2p8mulb %ymm2, %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x20,0xcf,0xca]
> +          vgf2p8mulb %ymm2, %ymm20, %ymm1
> +
> +// CHECK: vgf2p8mulb %ymm2, %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x22,0xcf,0xca]
> +          vgf2p8mulb %ymm2, %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  (%rcx), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x20,0xcf,0x09]
> +          vgf2p8mulb  (%rcx), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8mulb  -128(%rsp), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x20,0xcf,0x4c,0x24,0xfc]
> +          vgf2p8mulb  -128(%rsp), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8mulb  128(%rsp), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x20,0xcf,0x4c,0x24,0x04]
> +          vgf2p8mulb  128(%rsp), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8mulb  268435456(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x20,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          vgf2p8mulb  268435456(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8mulb  -536870912(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x20,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870912(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8mulb  -536870910(%rcx,%r14,8), %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x20,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870910(%rcx,%r14,8), %ymm20, %ymm1
> +
> +// CHECK: vgf2p8mulb  (%rcx), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x22,0xcf,0x09]
> +          vgf2p8mulb  (%rcx), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -128(%rsp), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x22,0xcf,0x4c,0x24,0xfc]
> +          vgf2p8mulb  -128(%rsp), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  128(%rsp), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xf2,0x5d,0x22,0xcf,0x4c,0x24,0x04]
> +          vgf2p8mulb  128(%rsp), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x22,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          vgf2p8mulb  268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x22,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8mulb  -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +// CHECK: encoding: [0x62,0xb2,0x5d,0x22,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2}
> +
> +// CHECK: vgf2p8affineinvqb $7, (%rcx){1to2}, %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x10,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb $7, (%rcx){1to2}, %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb $7, (%rcx){1to4}, %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x30,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb $7, (%rcx){1to4}, %ymm20, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx){1to2}, %xmm20, %xmm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x10,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx){1to2}, %xmm20, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx){1to4}, %ymm20, %ymm1
> +// CHECK: encoding: [0x62,0xf3,0xdd,0x30,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx){1to4}, %ymm20, %ymm1
> +
>
> Added: llvm/trunk/test/MC/X86/gfni-encoding.s
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/gfni-encoding.s?rev=318993&view=auto
> ==============================================================================
> --- llvm/trunk/test/MC/X86/gfni-encoding.s (added)
> +++ llvm/trunk/test/MC/X86/gfni-encoding.s Sun Nov 26 01:36:41 2017
> @@ -0,0 +1,254 @@
> +// RUN: llvm-mc -triple x86_64-unknown-unknown -mattr=+gfni --show-encoding < %s | FileCheck %s
> +
> +// CHECK: gf2p8affineinvqb $7, %xmm2, %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x3a,0xcf,0xca,0x07]
> +          gf2p8affineinvqb $7, %xmm2, %xmm1
> +
> +// CHECK: gf2p8affineqb $7, %xmm2, %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x3a,0xce,0xca,0x07]
> +          gf2p8affineqb $7, %xmm2, %xmm1
> +
> +// CHECK: gf2p8affineinvqb  $7, (%rcx), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x3a,0xcf,0x09,0x07]
> +          gf2p8affineinvqb  $7, (%rcx), %xmm1
> +
> +// CHECK: gf2p8affineinvqb  $7, -4(%rsp), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x3a,0xcf,0x4c,0x24,0xfc,0x07]
> +          gf2p8affineinvqb  $7, -4(%rsp), %xmm1
> +
> +// CHECK: gf2p8affineinvqb  $7, 4(%rsp), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x3a,0xcf,0x4c,0x24,0x04,0x07]
> +          gf2p8affineinvqb  $7, 4(%rsp), %xmm1
> +
> +// CHECK: gf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          gf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %xmm1
> +
> +// CHECK: gf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          gf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %xmm1
> +
> +// CHECK: gf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          gf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %xmm1
> +
> +// CHECK: gf2p8affineqb  $7, (%rcx), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x3a,0xce,0x09,0x07]
> +          gf2p8affineqb  $7, (%rcx), %xmm1
> +
> +// CHECK: gf2p8affineqb  $7, -4(%rsp), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x3a,0xce,0x4c,0x24,0xfc,0x07]
> +          gf2p8affineqb  $7, -4(%rsp), %xmm1
> +
> +// CHECK: gf2p8affineqb  $7, 4(%rsp), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x3a,0xce,0x4c,0x24,0x04,0x07]
> +          gf2p8affineqb  $7, 4(%rsp), %xmm1
> +
> +// CHECK: gf2p8affineqb  $7, 268435456(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          gf2p8affineqb  $7, 268435456(%rcx,%r14,8), %xmm1
> +
> +// CHECK: gf2p8affineqb  $7, -536870912(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          gf2p8affineqb  $7, -536870912(%rcx,%r14,8), %xmm1
> +
> +// CHECK: gf2p8affineqb  $7, -536870910(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          gf2p8affineqb  $7, -536870910(%rcx,%r14,8), %xmm1
> +
> +// CHECK: gf2p8mulb %xmm2, %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x38,0xcf,0xca]
> +          gf2p8mulb %xmm2, %xmm1
> +
> +// CHECK: gf2p8mulb  (%rcx), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x38,0xcf,0x09]
> +          gf2p8mulb  (%rcx), %xmm1
> +
> +// CHECK: gf2p8mulb  -4(%rsp), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x38,0xcf,0x4c,0x24,0xfc]
> +          gf2p8mulb  -4(%rsp), %xmm1
> +
> +// CHECK: gf2p8mulb  4(%rsp), %xmm1
> +// CHECK: encoding: [0x66,0x0f,0x38,0xcf,0x4c,0x24,0x04]
> +          gf2p8mulb  4(%rsp), %xmm1
> +
> +// CHECK: gf2p8mulb  268435456(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x38,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          gf2p8mulb  268435456(%rcx,%r14,8), %xmm1
> +
> +// CHECK: gf2p8mulb  -536870912(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x38,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          gf2p8mulb  -536870912(%rcx,%r14,8), %xmm1
> +
> +// CHECK: gf2p8mulb  -536870910(%rcx,%r14,8), %xmm1
> +// CHECK: encoding: [0x66,0x42,0x0f,0x38,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          gf2p8mulb  -536870910(%rcx,%r14,8), %xmm1
> +
> +// CHECK: vgf2p8affineinvqb $7, %xmm2, %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe3,0xa9,0xcf,0xca,0x07]
> +          vgf2p8affineinvqb $7, %xmm2, %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineqb $7, %xmm2, %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe3,0xa9,0xce,0xca,0x07]
> +          vgf2p8affineqb $7, %xmm2, %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, (%rcx), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe3,0xa9,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb  $7, (%rcx), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -4(%rsp), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe3,0xa9,0xcf,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineinvqb  $7, -4(%rsp), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 4(%rsp), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe3,0xa9,0xcf,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineinvqb  $7, 4(%rsp), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa3,0xa9,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa3,0xa9,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa3,0xa9,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe3,0xa9,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -4(%rsp), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe3,0xa9,0xce,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineqb  $7, -4(%rsp), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, 4(%rsp), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe3,0xa9,0xce,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineqb  $7, 4(%rsp), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa3,0xa9,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa3,0xa9,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa3,0xa9,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8affineinvqb $7, %ymm2, %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe3,0xad,0xcf,0xca,0x07]
> +          vgf2p8affineinvqb $7, %ymm2, %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineqb $7, %ymm2, %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe3,0xad,0xce,0xca,0x07]
> +          vgf2p8affineqb $7, %ymm2, %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, (%rcx), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe3,0xad,0xcf,0x09,0x07]
> +          vgf2p8affineinvqb  $7, (%rcx), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -4(%rsp), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe3,0xad,0xcf,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineinvqb  $7, -4(%rsp), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 4(%rsp), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe3,0xad,0xcf,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineinvqb  $7, 4(%rsp), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa3,0xad,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineinvqb  $7, 268435456(%rcx,%r14,8), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa3,0xad,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870912(%rcx,%r14,8), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa3,0xad,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineinvqb  $7, -536870910(%rcx,%r14,8), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, (%rcx), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe3,0xad,0xce,0x09,0x07]
> +          vgf2p8affineqb  $7, (%rcx), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, -4(%rsp), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe3,0xad,0xce,0x4c,0x24,0xfc,0x07]
> +          vgf2p8affineqb  $7, -4(%rsp), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, 4(%rsp), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe3,0xad,0xce,0x4c,0x24,0x04,0x07]
> +          vgf2p8affineqb  $7, 4(%rsp), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa3,0xad,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07]
> +          vgf2p8affineqb  $7, 268435456(%rcx,%r14,8), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa3,0xad,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870912(%rcx,%r14,8), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa3,0xad,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07]
> +          vgf2p8affineqb  $7, -536870910(%rcx,%r14,8), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8mulb %xmm2, %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe2,0x29,0xcf,0xca]
> +          vgf2p8mulb %xmm2, %xmm10, %xmm1
> +
> +// CHECK: vgf2p8mulb  (%rcx), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe2,0x29,0xcf,0x09]
> +          vgf2p8mulb  (%rcx), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8mulb  -4(%rsp), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe2,0x29,0xcf,0x4c,0x24,0xfc]
> +          vgf2p8mulb  -4(%rsp), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8mulb  4(%rsp), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xe2,0x29,0xcf,0x4c,0x24,0x04]
> +          vgf2p8mulb  4(%rsp), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8mulb  268435456(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa2,0x29,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          vgf2p8mulb  268435456(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8mulb  -536870912(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa2,0x29,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870912(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8mulb  -536870910(%rcx,%r14,8), %xmm10, %xmm1
> +// CHECK: encoding: [0xc4,0xa2,0x29,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870910(%rcx,%r14,8), %xmm10, %xmm1
> +
> +// CHECK: vgf2p8mulb %ymm2, %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe2,0x2d,0xcf,0xca]
> +          vgf2p8mulb %ymm2, %ymm10, %ymm1
> +
> +// CHECK: vgf2p8mulb  (%rcx), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe2,0x2d,0xcf,0x09]
> +          vgf2p8mulb  (%rcx), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8mulb  -4(%rsp), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe2,0x2d,0xcf,0x4c,0x24,0xfc]
> +          vgf2p8mulb  -4(%rsp), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8mulb  4(%rsp), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xe2,0x2d,0xcf,0x4c,0x24,0x04]
> +          vgf2p8mulb  4(%rsp), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8mulb  268435456(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa2,0x2d,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10]
> +          vgf2p8mulb  268435456(%rcx,%r14,8), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8mulb  -536870912(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa2,0x2d,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870912(%rcx,%r14,8), %ymm10, %ymm1
> +
> +// CHECK: vgf2p8mulb  -536870910(%rcx,%r14,8), %ymm10, %ymm1
> +// CHECK: encoding: [0xc4,0xa2,0x2d,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0]
> +          vgf2p8mulb  -536870910(%rcx,%r14,8), %ymm10, %ymm1
> +
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits

-- 
Davide

"There are no solved problems; there are only problems that are more
or less solved" -- Henri Poincare


More information about the llvm-commits mailing list