[llvm] r258680 - [X86][IFMA] adding intrinsics and encoding for multiply and add of unsigned 52bit integer
Asaf Badouh via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 25 03:14:25 PST 2016
Author: abadouh
Date: Mon Jan 25 05:14:24 2016
New Revision: 258680
URL: http://llvm.org/viewvc/llvm-project?rev=258680&view=rev
Log:
[X86][IFMA] adding intrinsics and encoding for multiply and add of unsigned 52bit integer
VPMADD52LUQ - Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Qword Accumulators
VPMADD52HUQ - Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to 64-bit Accumulators
Differential Revision: http://reviews.llvm.org/D16407
Added:
llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll
llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
llvm/trunk/test/MC/X86/avx512ifma-encoding.s
llvm/trunk/test/MC/X86/avx512ifmavl-encoding.s
Modified:
llvm/trunk/include/llvm/IR/IntrinsicsX86.td
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.h
llvm/trunk/lib/Target/X86/X86InstrAVX512.td
llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td
llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
Modified: llvm/trunk/include/llvm/IR/IntrinsicsX86.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsX86.td?rev=258680&r1=258679&r2=258680&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsX86.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td Mon Jan 25 05:14:24 2016
@@ -4040,6 +4040,54 @@ let TargetPrefix = "x86" in { // All in
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vpmadd52h_uq_128 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52huq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_maskz_vpmadd52h_uq_128 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52huq128_maskz">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vpmadd52l_uq_128 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52luq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_maskz_vpmadd52l_uq_128 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52luq128_maskz">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vpmadd52h_uq_256 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52huq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_maskz_vpmadd52h_uq_256 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52huq256_maskz">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vpmadd52l_uq_256 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52luq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_maskz_vpmadd52l_uq_256 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52luq256_maskz">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vpmadd52h_uq_512 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52huq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_maskz_vpmadd52h_uq_512 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52huq512_maskz">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vpmadd52l_uq_512 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52luq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_maskz_vpmadd52l_uq_512 :
+ GCCBuiltin<"__builtin_ia32_vpmadd52luq512_maskz">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=258680&r1=258679&r2=258680&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Jan 25 05:14:24 2016
@@ -21063,6 +21063,8 @@ const char *X86TargetLowering::getTarget
case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
+ case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
+ case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=258680&r1=258679&r2=258680&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Mon Jan 25 05:14:24 2016
@@ -441,6 +441,7 @@ namespace llvm {
MULHRS,
// Multiply and Add Packed Integers
VPMADDUBSW, VPMADDWD,
+ VPMADD52L, VPMADD52H,
// FMA nodes
FMADD,
FNMADD,
Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=258680&r1=258679&r2=258680&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Mon Jan 25 05:14:24 2016
@@ -4698,6 +4698,55 @@ defm VFNMADD : avx512_fma3s<0xAD, 0xBD,
defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86Fnmsub, X86FnmsubRnd>;
//===----------------------------------------------------------------------===//
+// AVX-512 Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit IFMA
+//===----------------------------------------------------------------------===//
+let Constraints = "$src1 = $dst" in {
+multiclass avx512_pmadd52_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _> {
+ defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
+ AVX512FMA3Base;
+
+ let mayLoad = 1 in {
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
+ AVX512FMA3Base;
+
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
+ !strconcat("$src2, ${src3}", _.BroadcastStr ),
+ (OpNode _.RC:$src1,
+ _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
+ AVX512FMA3Base, EVEX_B;
+ }
+}
+} // Constraints = "$src1 = $dst"
+
+multiclass avx512_pmadd52_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ AVX512VLVectorVTInfo _> {
+ let Predicates = [HasIFMA] in {
+ defm Z : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info512>,
+ EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
+ }
+ let Predicates = [HasVLX, HasIFMA] in {
+ defm Z256 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info256>,
+ EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
+ defm Z128 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info128>,
+ EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
+ }
+}
+
+defm VPMADD52LUQ : avx512_pmadd52_common<0xb4, "vpmadd52luq", x86vpmadd52l,
+ avx512vl_i64_info>, VEX_W;
+defm VPMADD52HUQ : avx512_pmadd52_common<0xb5, "vpmadd52huq", x86vpmadd52h,
+ avx512vl_i64_info>, VEX_W;
+
+//===----------------------------------------------------------------------===//
// AVX-512 Scalar convert from sign integer to float/double
//===----------------------------------------------------------------------===//
Modified: llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td?rev=258680&r1=258679&r2=258680&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td Mon Jan 25 05:14:24 2016
@@ -471,6 +471,9 @@ def X86FnmsubRnd : SDNode<"X86ISD::FN
def X86FmaddsubRnd : SDNode<"X86ISD::FMADDSUB_RND", SDTFmaRound>;
def X86FmsubaddRnd : SDNode<"X86ISD::FMSUBADD_RND", SDTFmaRound>;
+def x86vpmadd52l : SDNode<"X86ISD::VPMADD52L", SDTFma>;
+def x86vpmadd52h : SDNode<"X86ISD::VPMADD52H", SDTFma>;
+
def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", STDFp1SrcRm>;
def X86rcp28 : SDNode<"X86ISD::RCP28", STDFp1SrcRm>;
def X86exp2 : SDNode<"X86ISD::EXP2", STDFp1SrcRm>;
Modified: llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h?rev=258680&r1=258679&r2=258680&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h (original)
+++ llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h Mon Jan 25 05:14:24 2016
@@ -1891,6 +1891,18 @@ static const IntrinsicData IntrinsicsWi
X86ISD::VPERMV3, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermt2var_qi_512, VPERM_3OP_MASK,
X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_128 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_256 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_512 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_128 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_256 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_512 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_128, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_256, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_512, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
@@ -1979,6 +1991,18 @@ static const IntrinsicData IntrinsicsWi
X86ISD::VPERMV3, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_qi_512, VPERM_3OP_MASKZ,
X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_128, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_256, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_512, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_128, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_256, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_512, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_pbroadcastb_128, INTR_TYPE_1OP_MASK,
X86ISD::VBROADCAST, 0),
X86_INTRINSIC_DATA(avx512_pbroadcastb_256, INTR_TYPE_1OP_MASK,
Added: llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll?rev=258680&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll Mon Jan 25 05:14:24 2016
@@ -0,0 +1,105 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+ifma | FileCheck %s
+
+declare <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_512:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm3 {%k1}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm4
+; CHECK: vpxord %zmm2, %zmm2, %zmm2
+; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK: vpaddq %zmm0, %zmm3, %zmm0
+; CHECK: vpaddq %zmm2, %zmm4, %zmm1
+; CHECK: vpaddq %zmm0, %zmm1, %zmm0
+
+ %res = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64> zeroinitializer, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
+ %res4 = add <8 x i64> %res, %res1
+ %res5 = add <8 x i64> %res3, %res2
+ %res6 = add <8 x i64> %res5, %res4
+ ret <8 x i64> %res6
+}
+
+declare <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_512:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm3 {%k1} {z}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm4
+; CHECK: vpxord %zmm2, %zmm2, %zmm2
+; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK: vpaddq %zmm0, %zmm3, %zmm0
+; CHECK: vpaddq %zmm2, %zmm4, %zmm1
+; CHECK: vpaddq %zmm0, %zmm1, %zmm0
+
+ %res = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
+ %res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64> zeroinitializer, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
+ %res4 = add <8 x i64> %res, %res1
+ %res5 = add <8 x i64> %res3, %res2
+ %res6 = add <8 x i64> %res5, %res4
+ ret <8 x i64> %res6
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_512:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm3 {%k1}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm4
+; CHECK: vpxord %zmm2, %zmm2, %zmm2
+; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK: vpaddq %zmm0, %zmm3, %zmm0
+; CHECK: vpaddq %zmm2, %zmm4, %zmm1
+; CHECK: vpaddq %zmm0, %zmm1, %zmm0
+
+ %res = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64> zeroinitializer, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
+ %res4 = add <8 x i64> %res, %res1
+ %res5 = add <8 x i64> %res3, %res2
+ %res6 = add <8 x i64> %res5, %res4
+ ret <8 x i64> %res6
+}
+
+declare <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_512:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm3 {%k1} {z}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm4
+; CHECK: vpxord %zmm2, %zmm2, %zmm2
+; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK: vpaddq %zmm0, %zmm3, %zmm0
+; CHECK: vpaddq %zmm2, %zmm4, %zmm1
+; CHECK: vpaddq %zmm0, %zmm1, %zmm0
+
+ %res = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
+ %res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64> zeroinitializer, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
+ %res4 = add <8 x i64> %res, %res1
+ %res5 = add <8 x i64> %res3, %res2
+ %res6 = add <8 x i64> %res5, %res4
+ ret <8 x i64> %res6
+}
Added: llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll?rev=258680&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll Mon Jan 25 05:14:24 2016
@@ -0,0 +1,209 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl -mattr=+ifma | FileCheck %s
+
+declare <2 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_128:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52huq %xmm2, %xmm1, %xmm3 {%k1}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52huq %xmm2, %xmm1, %xmm4
+; CHECK: vxorps %xmm2, %xmm2, %xmm2
+; CHECK: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
+; CHECK: vpaddq %xmm0, %xmm3, %xmm0
+; CHECK: vpaddq %xmm2, %xmm4, %xmm1
+; CHECK: vpaddq %xmm0, %xmm1, %xmm0
+
+ %res = call <2 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
+ %res1 = call <2 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <2 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.128(<2 x i64> zeroinitializer, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <2 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
+ %res4 = add <2 x i64> %res, %res1
+ %res5 = add <2 x i64> %res3, %res2
+ %res6 = add <2 x i64> %res5, %res4
+ ret <2 x i64> %res6
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_256:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52huq %ymm2, %ymm1, %ymm3 {%k1}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52huq %ymm2, %ymm1, %ymm4
+; CHECK: vxorps %ymm2, %ymm2, %ymm2
+; CHECK: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1}
+; CHECK: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
+; CHECK: vpaddq %ymm0, %ymm3, %ymm0
+; CHECK: vpaddq %ymm2, %ymm4, %ymm1
+; CHECK: vpaddq %ymm0, %ymm1, %ymm0
+
+ %res = call <4 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
+ %res1 = call <4 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.256(<4 x i64> zeroinitializer, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <4 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
+ %res4 = add <4 x i64> %res, %res1
+ %res5 = add <4 x i64> %res3, %res2
+ %res6 = add <4 x i64> %res5, %res4
+ ret <4 x i64> %res6
+}
+
+declare <2 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_128:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52huq %xmm2, %xmm1, %xmm3 {%k1} {z}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52huq %xmm2, %xmm1, %xmm4
+; CHECK: vxorps %xmm2, %xmm2, %xmm2
+; CHECK: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
+; CHECK: vpaddq %xmm0, %xmm3, %xmm0
+; CHECK: vpaddq %xmm2, %xmm4, %xmm1
+; CHECK: vpaddq %xmm0, %xmm1, %xmm0
+
+ %res = call <2 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
+ %res1 = call <2 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <2 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.128(<2 x i64> zeroinitializer, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <2 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
+ %res4 = add <2 x i64> %res, %res1
+ %res5 = add <2 x i64> %res3, %res2
+ %res6 = add <2 x i64> %res5, %res4
+ ret <2 x i64> %res6
+}
+
+declare <4 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_256:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52huq %ymm2, %ymm1, %ymm3 {%k1} {z}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52huq %ymm2, %ymm1, %ymm4
+; CHECK: vxorps %ymm2, %ymm2, %ymm2
+; CHECK: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1} {z}
+; CHECK: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
+; CHECK: vpaddq %ymm0, %ymm3, %ymm0
+; CHECK: vpaddq %ymm2, %ymm4, %ymm1
+; CHECK: vpaddq %ymm0, %ymm1, %ymm0
+
+ %res = call <4 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
+ %res1 = call <4 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.256(<4 x i64> zeroinitializer, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <4 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
+ %res4 = add <4 x i64> %res, %res1
+ %res5 = add <4 x i64> %res3, %res2
+ %res6 = add <4 x i64> %res5, %res4
+ ret <4 x i64> %res6
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_128:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52luq %xmm2, %xmm1, %xmm3 {%k1}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52luq %xmm2, %xmm1, %xmm4
+; CHECK: vxorps %xmm2, %xmm2, %xmm2
+; CHECK: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
+; CHECK: vpaddq %xmm0, %xmm3, %xmm0
+; CHECK: vpaddq %xmm2, %xmm4, %xmm1
+; CHECK: vpaddq %xmm0, %xmm1, %xmm0
+
+ %res = call <2 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
+ %res1 = call <2 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <2 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.128(<2 x i64> zeroinitializer, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <2 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
+ %res4 = add <2 x i64> %res, %res1
+ %res5 = add <2 x i64> %res3, %res2
+ %res6 = add <2 x i64> %res5, %res4
+ ret <2 x i64> %res6
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_256:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52luq %ymm2, %ymm1, %ymm3 {%k1}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52luq %ymm2, %ymm1, %ymm4
+; CHECK: vxorps %ymm2, %ymm2, %ymm2
+; CHECK: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1}
+; CHECK: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
+; CHECK: vpaddq %ymm0, %ymm3, %ymm0
+; CHECK: vpaddq %ymm2, %ymm4, %ymm1
+; CHECK: vpaddq %ymm0, %ymm1, %ymm0
+
+ %res = call <4 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
+ %res1 = call <4 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.256(<4 x i64> zeroinitializer, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <4 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
+ %res4 = add <4 x i64> %res, %res1
+ %res5 = add <4 x i64> %res3, %res2
+ %res6 = add <4 x i64> %res5, %res4
+ ret <4 x i64> %res6
+}
+
+declare <2 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_128:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52luq %xmm2, %xmm1, %xmm3 {%k1} {z}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52luq %xmm2, %xmm1, %xmm4
+; CHECK: vxorps %xmm2, %xmm2, %xmm2
+; CHECK: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
+; CHECK: vpaddq %xmm0, %xmm3, %xmm0
+; CHECK: vpaddq %xmm2, %xmm4, %xmm1
+; CHECK: vpaddq %xmm0, %xmm1, %xmm0
+
+ %res = call <2 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
+ %res1 = call <2 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <2 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.128(<2 x i64> zeroinitializer, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <2 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
+ %res4 = add <2 x i64> %res, %res1
+ %res5 = add <2 x i64> %res3, %res2
+ %res6 = add <2 x i64> %res5, %res4
+ ret <2 x i64> %res6
+}
+
+declare <4 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_256:
+; CHECK: kmovw %edi, %k1
+; CHECK: vmovaps %zmm0, %zmm3
+; CHECK: vpmadd52luq %ymm2, %ymm1, %ymm3 {%k1} {z}
+; CHECK: vmovaps %zmm0, %zmm4
+; CHECK: vpmadd52luq %ymm2, %ymm1, %ymm4
+; CHECK: vxorps %ymm2, %ymm2, %ymm2
+; CHECK: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1} {z}
+; CHECK: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
+; CHECK: vpaddq %ymm0, %ymm3, %ymm0
+; CHECK: vpaddq %ymm2, %ymm4, %ymm1
+; CHECK: vpaddq %ymm0, %ymm1, %ymm0
+
+ %res = call <4 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
+ %res1 = call <4 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.256(<4 x i64> zeroinitializer, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res3 = call <4 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
+ %res4 = add <4 x i64> %res, %res1
+ %res5 = add <4 x i64> %res3, %res2
+ %res6 = add <4 x i64> %res5, %res4
+ ret <4 x i64> %res6
+}
Added: llvm/trunk/test/MC/X86/avx512ifma-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/avx512ifma-encoding.s?rev=258680&view=auto
==============================================================================
--- llvm/trunk/test/MC/X86/avx512ifma-encoding.s (added)
+++ llvm/trunk/test/MC/X86/avx512ifma-encoding.s Mon Jan 25 05:14:24 2016
@@ -0,0 +1,145 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown -mcpu=knl -mattr=+ifma --show-encoding %s | FileCheck %s
+
+ vpmadd52luq %zmm4, %zmm5, %zmm6
+//CHECK: vpmadd52luq %zmm4, %zmm5, %zmm6
+//CHECK: encoding: [0x62,0xf2,0xd5,0x48,0xb4,0xf4]
+
+ vpmadd52luq %zmm4, %zmm5, %zmm6 {%k7}
+//CHECK: vpmadd52luq %zmm4, %zmm5, %zmm6 {%k7}
+//CHECK: encoding: [0x62,0xf2,0xd5,0x4f,0xb4,0xf4]
+
+ vpmadd52luq %zmm4, %zmm5, %zmm6 {%k7} {z}
+//CHECK: vpmadd52luq %zmm4, %zmm5, %zmm6 {%k7} {z}
+//CHECK: encoding: [0x62,0xf2,0xd5,0xcf,0xb4,0xf4]
+
+ vpmadd52luq %zmm28, %zmm29, %zmm30
+//CHECK: vpmadd52luq %zmm28, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x02,0x95,0x40,0xb4,0xf4]
+
+ vpmadd52luq %zmm28, %zmm29, %zmm30 {%k7}
+//CHECK: vpmadd52luq %zmm28, %zmm29, %zmm30 {%k7}
+//CHECK: encoding: [0x62,0x02,0x95,0x47,0xb4,0xf4]
+
+ vpmadd52luq %zmm28, %zmm29, %zmm30 {%k7} {z}
+//CHECK: vpmadd52luq %zmm28, %zmm29, %zmm30 {%k7} {z}
+//CHECK: encoding: [0x62,0x02,0x95,0xc7,0xb4,0xf4]
+
+ vpmadd52luq (%rcx), %zmm29, %zmm30
+//CHECK: vpmadd52luq (%rcx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb4,0x31]
+
+ vpmadd52luq 0x123(%rax,%r14,8), %zmm29, %zmm30
+//CHECK: vpmadd52luq 291(%rax,%r14,8), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x22,0x95,0x40,0xb4,0xb4,0xf0,0x23,0x01,0x00,0x00]
+
+ vpmadd52luq (%rcx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52luq (%rcx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb4,0x31]
+
+ vpmadd52luq 0x1fc0(%rdx), %zmm29, %zmm30
+//CHECK: vpmadd52luq 8128(%rdx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb4,0x72,0x7f]
+
+ vpmadd52luq 0x2000(%rdx), %zmm29, %zmm30
+//CHECK: vpmadd52luq 8192(%rdx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb4,0xb2,0x00,0x20,0x00,0x00]
+
+ vpmadd52luq -0x2000(%rdx), %zmm29, %zmm30
+//CHECK: vpmadd52luq -8192(%rdx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb4,0x72,0x80]
+
+ vpmadd52luq -0x2040(%rdx), %zmm29, %zmm30
+//CHECK: vpmadd52luq -8256(%rdx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb4,0xb2,0xc0,0xdf,0xff,0xff]
+
+ vpmadd52luq 0x3f8(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52luq 1016(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb4,0x72,0x7f]
+
+ vpmadd52luq 0x400(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52luq 1024(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb4,0xb2,0x00,0x04,0x00,0x00]
+
+ vpmadd52luq -0x400(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52luq -1024(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb4,0x72,0x80]
+
+ vpmadd52luq -0x408(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52luq -1032(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb4,0xb2,0xf8,0xfb,0xff,0xff]
+
+ vpmadd52luq 0x1234(%rax,%r14,8), %zmm29, %zmm30
+//CHECK: vpmadd52luq 4660(%rax,%r14,8), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x22,0x95,0x40,0xb4,0xb4,0xf0,0x34,0x12,0x00,0x00]
+
+ vpmadd52huq %zmm4, %zmm5, %zmm6
+//CHECK: vpmadd52huq %zmm4, %zmm5, %zmm6
+//CHECK: encoding: [0x62,0xf2,0xd5,0x48,0xb5,0xf4]
+
+ vpmadd52huq %zmm4, %zmm5, %zmm6 {%k7}
+//CHECK: vpmadd52huq %zmm4, %zmm5, %zmm6 {%k7}
+//CHECK: encoding: [0x62,0xf2,0xd5,0x4f,0xb5,0xf4]
+
+ vpmadd52huq %zmm4, %zmm5, %zmm6 {%k7} {z}
+//CHECK: vpmadd52huq %zmm4, %zmm5, %zmm6 {%k7} {z}
+//CHECK: encoding: [0x62,0xf2,0xd5,0xcf,0xb5,0xf4]
+
+ vpmadd52huq %zmm28, %zmm29, %zmm30
+//CHECK: vpmadd52huq %zmm28, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x02,0x95,0x40,0xb5,0xf4]
+
+ vpmadd52huq %zmm28, %zmm29, %zmm30 {%k7}
+//CHECK: vpmadd52huq %zmm28, %zmm29, %zmm30 {%k7}
+//CHECK: encoding: [0x62,0x02,0x95,0x47,0xb5,0xf4]
+
+ vpmadd52huq %zmm28, %zmm29, %zmm30 {%k7} {z}
+//CHECK: vpmadd52huq %zmm28, %zmm29, %zmm30 {%k7} {z}
+//CHECK: encoding: [0x62,0x02,0x95,0xc7,0xb5,0xf4]
+
+ vpmadd52huq (%rcx), %zmm29, %zmm30
+//CHECK: vpmadd52huq (%rcx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb5,0x31]
+
+ vpmadd52huq 0x123(%rax,%r14,8), %zmm29, %zmm30
+//CHECK: vpmadd52huq 291(%rax,%r14,8), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x22,0x95,0x40,0xb5,0xb4,0xf0,0x23,0x01,0x00,0x00]
+
+ vpmadd52huq (%rcx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52huq (%rcx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb5,0x31]
+
+ vpmadd52huq 0x1fc0(%rdx), %zmm29, %zmm30
+//CHECK: vpmadd52huq 8128(%rdx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb5,0x72,0x7f]
+
+ vpmadd52huq 0x2000(%rdx), %zmm29, %zmm30
+//CHECK: vpmadd52huq 8192(%rdx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb5,0xb2,0x00,0x20,0x00,0x00]
+
+ vpmadd52huq -0x2000(%rdx), %zmm29, %zmm30
+//CHECK: vpmadd52huq -8192(%rdx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb5,0x72,0x80]
+
+ vpmadd52huq -0x2040(%rdx), %zmm29, %zmm30
+//CHECK: vpmadd52huq -8256(%rdx), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x40,0xb5,0xb2,0xc0,0xdf,0xff,0xff]
+
+ vpmadd52huq 0x3f8(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52huq 1016(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb5,0x72,0x7f]
+
+ vpmadd52huq 0x400(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52huq 1024(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb5,0xb2,0x00,0x04,0x00,0x00]
+
+ vpmadd52huq -0x400(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52huq -1024(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb5,0x72,0x80]
+
+ vpmadd52huq -0x408(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: vpmadd52huq -1032(%rdx){1to8}, %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x50,0xb5,0xb2,0xf8,0xfb,0xff,0xff]
+
+ vpmadd52huq 0x1234(%rax,%r14,8), %zmm29, %zmm30
+//CHECK: vpmadd52huq 4660(%rax,%r14,8), %zmm29, %zmm30
+//CHECK: encoding: [0x62,0x22,0x95,0x40,0xb5,0xb4,0xf0,0x34,0x12,0x00,0x00]
Added: llvm/trunk/test/MC/X86/avx512ifmavl-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/avx512ifmavl-encoding.s?rev=258680&view=auto
==============================================================================
--- llvm/trunk/test/MC/X86/avx512ifmavl-encoding.s (added)
+++ llvm/trunk/test/MC/X86/avx512ifmavl-encoding.s Mon Jan 25 05:14:24 2016
@@ -0,0 +1,274 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown -mcpu=knl -mattr=+ifma -mattr=+avx512vl --show-encoding %s | FileCheck %s
+
+ vpmadd52luq %xmm4, %xmm5, %xmm6 {%k7}
+//CHECK: vpmadd52luq %xmm4, %xmm5, %xmm6 {%k7}
+//CHECK: encoding: [0x62,0xf2,0xd5,0x0f,0xb4,0xf4]
+
+ vpmadd52luq %xmm4, %xmm5, %xmm6 {%k7} {z}
+//CHECK: vpmadd52luq %xmm4, %xmm5, %xmm6 {%k7} {z}
+//CHECK: encoding: [0x62,0xf2,0xd5,0x8f,0xb4,0xf4]
+
+ vpmadd52luq %ymm4, %ymm5, %ymm6 {%k7}
+//CHECK: vpmadd52luq %ymm4, %ymm5, %ymm6 {%k7}
+//CHECK: encoding: [0x62,0xf2,0xd5,0x2f,0xb4,0xf4]
+
+ vpmadd52luq %ymm4, %ymm5, %ymm6 {%k7} {z}
+//CHECK: vpmadd52luq %ymm4, %ymm5, %ymm6 {%k7} {z}
+//CHECK: encoding: [0x62,0xf2,0xd5,0xaf,0xb4,0xf4]
+
+ vpmadd52luq %xmm28, %xmm29, %xmm30
+//CHECK: vpmadd52luq %xmm28, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x02,0x95,0x00,0xb4,0xf4]
+
+ vpmadd52luq %xmm28, %xmm29, %xmm30 {%k7}
+//CHECK: vpmadd52luq %xmm28, %xmm29, %xmm30 {%k7}
+//CHECK: encoding: [0x62,0x02,0x95,0x07,0xb4,0xf4]
+
+ vpmadd52luq %xmm28, %xmm29, %xmm30 {%k7} {z}
+//CHECK: vpmadd52luq %xmm28, %xmm29, %xmm30 {%k7} {z}
+//CHECK: encoding: [0x62,0x02,0x95,0x87,0xb4,0xf4]
+
+ vpmadd52luq (%rcx), %xmm29, %xmm30
+//CHECK: vpmadd52luq (%rcx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb4,0x31]
+
+ vpmadd52luq 0x123(%rax,%r14,8), %xmm29, %xmm30
+//CHECK: vpmadd52luq 291(%rax,%r14,8), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x22,0x95,0x00,0xb4,0xb4,0xf0,0x23,0x01,0x00,0x00]
+
+ vpmadd52luq (%rcx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52luq (%rcx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb4,0x31]
+
+ vpmadd52luq 0x7f0(%rdx), %xmm29, %xmm30
+//CHECK: vpmadd52luq 2032(%rdx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb4,0x72,0x7f]
+
+ vpmadd52luq 0x800(%rdx), %xmm29, %xmm30
+//CHECK: vpmadd52luq 2048(%rdx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb4,0xb2,0x00,0x08,0x00,0x00]
+
+ vpmadd52luq -0x800(%rdx), %xmm29, %xmm30
+//CHECK: vpmadd52luq -2048(%rdx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb4,0x72,0x80]
+
+ vpmadd52luq -0x810(%rdx), %xmm29, %xmm30
+//CHECK: vpmadd52luq -2064(%rdx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb4,0xb2,0xf0,0xf7,0xff,0xff]
+
+ vpmadd52luq 0x3f8(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52luq 1016(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb4,0x72,0x7f]
+
+ vpmadd52luq 0x400(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52luq 1024(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb4,0xb2,0x00,0x04,0x00,0x00]
+
+ vpmadd52luq -0x400(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52luq -1024(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb4,0x72,0x80]
+
+ vpmadd52luq -0x408(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52luq -1032(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb4,0xb2,0xf8,0xfb,0xff,0xff]
+
+ vpmadd52luq %ymm28, %ymm29, %ymm30
+//CHECK: vpmadd52luq %ymm28, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x02,0x95,0x20,0xb4,0xf4]
+
+ vpmadd52luq %ymm28, %ymm29, %ymm30 {%k7}
+//CHECK: vpmadd52luq %ymm28, %ymm29, %ymm30 {%k7}
+//CHECK: encoding: [0x62,0x02,0x95,0x27,0xb4,0xf4]
+
+ vpmadd52luq %ymm28, %ymm29, %ymm30 {%k7} {z}
+//CHECK: vpmadd52luq %ymm28, %ymm29, %ymm30 {%k7} {z}
+//CHECK: encoding: [0x62,0x02,0x95,0xa7,0xb4,0xf4]
+
+ vpmadd52luq (%rcx), %ymm29, %ymm30
+//CHECK: vpmadd52luq (%rcx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb4,0x31]
+
+ vpmadd52luq 0x123(%rax,%r14,8), %ymm29, %ymm30
+//CHECK: vpmadd52luq 291(%rax,%r14,8), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x22,0x95,0x20,0xb4,0xb4,0xf0,0x23,0x01,0x00,0x00]
+
+ vpmadd52luq (%rcx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52luq (%rcx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb4,0x31]
+
+ vpmadd52luq 0xfe0(%rdx), %ymm29, %ymm30
+//CHECK: vpmadd52luq 4064(%rdx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb4,0x72,0x7f]
+
+ vpmadd52luq 0x1000(%rdx), %ymm29, %ymm30
+//CHECK: vpmadd52luq 4096(%rdx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb4,0xb2,0x00,0x10,0x00,0x00]
+
+ vpmadd52luq -0x1000(%rdx), %ymm29, %ymm30
+//CHECK: vpmadd52luq -4096(%rdx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb4,0x72,0x80]
+
+ vpmadd52luq -0x1020(%rdx), %ymm29, %ymm30
+//CHECK: vpmadd52luq -4128(%rdx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb4,0xb2,0xe0,0xef,0xff,0xff]
+
+ vpmadd52luq 0x3f8(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52luq 1016(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb4,0x72,0x7f]
+
+ vpmadd52luq 0x400(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52luq 1024(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb4,0xb2,0x00,0x04,0x00,0x00]
+
+ vpmadd52luq -0x400(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52luq -1024(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb4,0x72,0x80]
+
+ vpmadd52luq -0x408(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52luq -1032(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb4,0xb2,0xf8,0xfb,0xff,0xff]
+
+ vpmadd52luq 0x1234(%rax,%r14,8), %xmm29, %xmm30
+//CHECK: vpmadd52luq 4660(%rax,%r14,8), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x22,0x95,0x00,0xb4,0xb4,0xf0,0x34,0x12,0x00,0x00]
+
+ vpmadd52luq 0x1234(%rax,%r14,8), %ymm29, %ymm30
+//CHECK: vpmadd52luq 4660(%rax,%r14,8), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x22,0x95,0x20,0xb4,0xb4,0xf0,0x34,0x12,0x00,0x00]
+
+vpmadd52huq %xmm4, %xmm5, %xmm6 {%k7}
+//CHECK: vpmadd52huq %xmm4, %xmm5, %xmm6 {%k7}
+//CHECK: encoding: [0x62,0xf2,0xd5,0x0f,0xb5,0xf4]
+
+ vpmadd52huq %xmm4, %xmm5, %xmm6 {%k7} {z}
+//CHECK: vpmadd52huq %xmm4, %xmm5, %xmm6 {%k7} {z}
+//CHECK: encoding: [0x62,0xf2,0xd5,0x8f,0xb5,0xf4]
+
+ vpmadd52huq %ymm4, %ymm5, %ymm6 {%k7}
+//CHECK: vpmadd52huq %ymm4, %ymm5, %ymm6 {%k7}
+//CHECK: encoding: [0x62,0xf2,0xd5,0x2f,0xb5,0xf4]
+
+ vpmadd52huq %ymm4, %ymm5, %ymm6 {%k7} {z}
+//CHECK: vpmadd52huq %ymm4, %ymm5, %ymm6 {%k7} {z}
+//CHECK: encoding: [0x62,0xf2,0xd5,0xaf,0xb5,0xf4]
+
+ vpmadd52huq %xmm28, %xmm29, %xmm30
+//CHECK: vpmadd52huq %xmm28, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x02,0x95,0x00,0xb5,0xf4]
+
+ vpmadd52huq %xmm28, %xmm29, %xmm30 {%k7}
+//CHECK: vpmadd52huq %xmm28, %xmm29, %xmm30 {%k7}
+//CHECK: encoding: [0x62,0x02,0x95,0x07,0xb5,0xf4]
+
+ vpmadd52huq %xmm28, %xmm29, %xmm30 {%k7} {z}
+//CHECK: vpmadd52huq %xmm28, %xmm29, %xmm30 {%k7} {z}
+//CHECK: encoding: [0x62,0x02,0x95,0x87,0xb5,0xf4]
+
+ vpmadd52huq (%rcx), %xmm29, %xmm30
+//CHECK: vpmadd52huq (%rcx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb5,0x31]
+
+ vpmadd52huq 0x123(%rax,%r14,8), %xmm29, %xmm30
+//CHECK: vpmadd52huq 291(%rax,%r14,8), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x22,0x95,0x00,0xb5,0xb4,0xf0,0x23,0x01,0x00,0x00]
+
+ vpmadd52huq (%rcx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52huq (%rcx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb5,0x31]
+
+ vpmadd52huq 0x7f0(%rdx), %xmm29, %xmm30
+//CHECK: vpmadd52huq 2032(%rdx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb5,0x72,0x7f]
+
+ vpmadd52huq 0x800(%rdx), %xmm29, %xmm30
+//CHECK: vpmadd52huq 2048(%rdx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb5,0xb2,0x00,0x08,0x00,0x00]
+
+ vpmadd52huq -0x800(%rdx), %xmm29, %xmm30
+//CHECK: vpmadd52huq -2048(%rdx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb5,0x72,0x80]
+
+ vpmadd52huq -0x810(%rdx), %xmm29, %xmm30
+//CHECK: vpmadd52huq -2064(%rdx), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x00,0xb5,0xb2,0xf0,0xf7,0xff,0xff]
+
+ vpmadd52huq 0x3f8(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52huq 1016(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb5,0x72,0x7f]
+
+ vpmadd52huq 0x400(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52huq 1024(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb5,0xb2,0x00,0x04,0x00,0x00]
+
+ vpmadd52huq -0x400(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52huq -1024(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb5,0x72,0x80]
+
+ vpmadd52huq -0x408(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: vpmadd52huq -1032(%rdx){1to2}, %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x62,0x95,0x10,0xb5,0xb2,0xf8,0xfb,0xff,0xff]
+
+ vpmadd52huq %ymm28, %ymm29, %ymm30
+//CHECK: vpmadd52huq %ymm28, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x02,0x95,0x20,0xb5,0xf4]
+
+ vpmadd52huq %ymm28, %ymm29, %ymm30 {%k7}
+//CHECK: vpmadd52huq %ymm28, %ymm29, %ymm30 {%k7}
+//CHECK: encoding: [0x62,0x02,0x95,0x27,0xb5,0xf4]
+
+ vpmadd52huq %ymm28, %ymm29, %ymm30 {%k7} {z}
+//CHECK: vpmadd52huq %ymm28, %ymm29, %ymm30 {%k7} {z}
+//CHECK: encoding: [0x62,0x02,0x95,0xa7,0xb5,0xf4]
+
+ vpmadd52huq (%rcx), %ymm29, %ymm30
+//CHECK: vpmadd52huq (%rcx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb5,0x31]
+
+ vpmadd52huq 0x123(%rax,%r14,8), %ymm29, %ymm30
+//CHECK: vpmadd52huq 291(%rax,%r14,8), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x22,0x95,0x20,0xb5,0xb4,0xf0,0x23,0x01,0x00,0x00]
+
+ vpmadd52huq (%rcx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52huq (%rcx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb5,0x31]
+
+ vpmadd52huq 0xfe0(%rdx), %ymm29, %ymm30
+//CHECK: vpmadd52huq 4064(%rdx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb5,0x72,0x7f]
+
+ vpmadd52huq 0x1000(%rdx), %ymm29, %ymm30
+//CHECK: vpmadd52huq 4096(%rdx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb5,0xb2,0x00,0x10,0x00,0x00]
+
+ vpmadd52huq -0x1000(%rdx), %ymm29, %ymm30
+//CHECK: vpmadd52huq -4096(%rdx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb5,0x72,0x80]
+
+ vpmadd52huq -0x1020(%rdx), %ymm29, %ymm30
+//CHECK: vpmadd52huq -4128(%rdx), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x20,0xb5,0xb2,0xe0,0xef,0xff,0xff]
+
+ vpmadd52huq 0x3f8(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52huq 1016(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb5,0x72,0x7f]
+
+ vpmadd52huq 0x400(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52huq 1024(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb5,0xb2,0x00,0x04,0x00,0x00]
+
+ vpmadd52huq -0x400(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52huq -1024(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb5,0x72,0x80]
+
+ vpmadd52huq -0x408(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: vpmadd52huq -1032(%rdx){1to4}, %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x62,0x95,0x30,0xb5,0xb2,0xf8,0xfb,0xff,0xff]
+
+ vpmadd52huq 0x1234(%rax,%r14,8), %xmm29, %xmm30
+//CHECK: vpmadd52huq 4660(%rax,%r14,8), %xmm29, %xmm30
+//CHECK: encoding: [0x62,0x22,0x95,0x00,0xb5,0xb4,0xf0,0x34,0x12,0x00,0x00]
+
+ vpmadd52huq 0x1234(%rax,%r14,8), %ymm29, %ymm30
+//CHECK: vpmadd52huq 4660(%rax,%r14,8), %ymm29, %ymm30
+//CHECK: encoding: [0x62,0x22,0x95,0x20,0xb5,0xb4,0xf0,0x34,0x12,0x00,0x00]
+
More information about the llvm-commits
mailing list