r321487 - [x86][icelake][vbmi2]
Coby Tayree via cfe-commits
cfe-commits at lists.llvm.org
Wed Dec 27 03:25:08 PST 2017
Author: coby
Date: Wed Dec 27 03:25:07 2017
New Revision: 321487
URL: http://llvm.org/viewvc/llvm-project?rev=321487&view=rev
Log:
[x86][icelake][vbmi2]
added vbmi2 feature recognition
added intrinsics support for vbmi2 instructions
_mm[128,256,512]_mask[z]_compress_epi[16,32]
_mm[128,256,512]_mask_compressstoreu_epi[16,32]
_mm[128,256,512]_mask[z]_expand_epi[16,32]
_mm[128,256,512]_mask[z]_expandloadu_epi[16,32]
_mm[128,256,512]_mask[z]_sh[l,r]di_epi[16,32,64]
_mm[128,256,512]_mask_sh[l,r]dv_epi[16,32,64]
matching a similar work on the backend (D40206)
Differential Revision: https://reviews.llvm.org/D41557
Added:
cfe/trunk/lib/Headers/avx512vbmi2intrin.h (with props)
cfe/trunk/lib/Headers/avx512vlvbmi2intrin.h (with props)
cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c (with props)
cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c (with props)
Modified:
cfe/trunk/include/clang/Basic/BuiltinsX86.def
cfe/trunk/include/clang/Driver/Options.td
cfe/trunk/lib/Basic/Targets/X86.cpp
cfe/trunk/lib/Basic/Targets/X86.h
cfe/trunk/lib/Headers/CMakeLists.txt
cfe/trunk/lib/Headers/immintrin.h
cfe/trunk/test/CodeGen/attr-target-x86.c
cfe/trunk/test/Driver/x86-target-features.c
cfe/trunk/test/Preprocessor/predefined-arch-macros.c
cfe/trunk/test/Preprocessor/x86_target_features.c
Modified: cfe/trunk/include/clang/Basic/BuiltinsX86.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsX86.def?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/BuiltinsX86.def (original)
+++ cfe/trunk/include/clang/Basic/BuiltinsX86.def Wed Dec 27 03:25:07 2017
@@ -1168,6 +1168,12 @@ TARGET_BUILTIN(__builtin_ia32_compressdf
TARGET_BUILTIN(__builtin_ia32_compressdf256_mask, "V4dV4dV4dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressdi128_mask, "V2LLiV2LLiV2LLiUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressdi256_mask, "V4LLiV4LLiV4LLiUc", "", "avx512vl")
+
+TARGET_BUILTIN(__builtin_ia32_compresshi128_mask, "V8sV8sV8sUc","","avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_compresshi256_mask, "V16sV16sV16sUs","","avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_compressqi128_mask, "V16cV16cV16cUs","","avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_compressqi256_mask, "V32cV32cV32cUi","","avx512vl,avx512vbmi2")
+
TARGET_BUILTIN(__builtin_ia32_compresssf128_mask, "V4fV4fV4fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compresssf256_mask, "V8fV8fV8fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compresssi128_mask, "V4iV4iV4iUc", "", "avx512vl")
@@ -1176,6 +1182,12 @@ TARGET_BUILTIN(__builtin_ia32_compressst
TARGET_BUILTIN(__builtin_ia32_compressstoredf256_mask, "vV4d*V4dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressstoredi128_mask, "vV2LLi*V2LLiUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressstoredi256_mask, "vV4LLi*V4LLiUc", "", "avx512vl")
+
+TARGET_BUILTIN(__builtin_ia32_compressstorehi128_mask, "vV8s*V8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_compressstorehi256_mask, "vV16s*V16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_compressstoreqi128_mask, "vV16c*V16cUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_compressstoreqi256_mask, "vV32c*V32cUi", "", "avx512vl,avx512vbmi2")
+
TARGET_BUILTIN(__builtin_ia32_compressstoresf128_mask, "vV4f*V4fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressstoresf256_mask, "vV8f*V8fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressstoresi128_mask, "vV4i*V4iUc", "", "avx512vl")
@@ -1208,10 +1220,22 @@ TARGET_BUILTIN(__builtin_ia32_expanddf12
TARGET_BUILTIN(__builtin_ia32_expanddf256_mask, "V4dV4dV4dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_expanddi128_mask, "V2LLiV2LLiV2LLiUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_expanddi256_mask, "V4LLiV4LLiV4LLiUc", "", "avx512vl")
+
+TARGET_BUILTIN(__builtin_ia32_expandhi128_mask, "V8sV8sV8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_expandhi256_mask, "V16sV16sV16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_expandqi128_mask, "V16cV16cV16cUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_expandqi256_mask, "V32cV32cV32cUi", "", "avx512vl,avx512vbmi2")
+
TARGET_BUILTIN(__builtin_ia32_expandloaddf128_mask, "V2dV2d*V2dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_expandloaddf256_mask, "V4dV4d*V4dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_expandloaddi128_mask, "V4iV2LLi*V2LLiUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_expandloaddi256_mask, "V4LLiV4LLi*V4LLiUc", "", "avx512vl")
+
+TARGET_BUILTIN(__builtin_ia32_expandloadhi128_mask, "V8sV8sC*V8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_expandloadhi256_mask, "V16sV16sC*V16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_expandloadqi128_mask, "V16cV16cC*V16cUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_expandloadqi256_mask, "V32cV32cC*V32cUi", "", "avx512vl,avx512vbmi2")
+
TARGET_BUILTIN(__builtin_ia32_expandloadsf128_mask, "V4fV4f*V4fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_expandloadsf256_mask, "V8fV8f*V8fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_expandloadsi128_mask, "V4iV4i*V4iUc", "", "avx512vl")
@@ -1284,6 +1308,65 @@ TARGET_BUILTIN(__builtin_ia32_vpermt2var
TARGET_BUILTIN(__builtin_ia32_vpermt2varq128_maskz, "V2LLiV2LLiV2LLiV2LLiUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpermt2varq256_mask, "V4LLiV4LLiV4LLiV4LLiUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpermt2varq256_maskz, "V4LLiV4LLiV4LLiV4LLiUc", "", "avx512vl")
+
+TARGET_BUILTIN(__builtin_ia32_vpshldd128_mask, "V4iV4iV4iIiV4iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldd256_mask, "V8iV8iV8iIiV8iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldd512_mask, "V16iV16iV16iIiV16iUs", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldq128_mask, "V2LLiV2LLiV2LLiIiV2LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldq256_mask, "V4LLiV4LLiV4LLiIiV4LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldq512_mask, "V8LLiV8LLiV8LLiIiV8LLiUc", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldw128_mask, "V8sV8sV8sIiV8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldw256_mask, "V16sV16sV16sIiV16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldw512_mask, "V32sV32sV32sIiV32sUi", "", "avx512vbmi2")
+
+TARGET_BUILTIN(__builtin_ia32_vpshldvd128_mask, "V4iV4iV4iV4iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvd256_mask, "V8iV8iV8iV8iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvd512_mask, "V16iV16iV16iV16iUs", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvq128_mask, "V2LLiV2LLiV2LLiV2LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvq256_mask, "V4LLiV4LLiV4LLiV4LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvq512_mask, "V8LLiV8LLiV8LLiV8LLiUc", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvw128_mask, "V8sV8sV8sV8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvw256_mask, "V16sV16sV16sV16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvw512_mask, "V32sV32sV32sV32sUi", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvd128_maskz, "V4iV4iV4iV4iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvd256_maskz, "V8iV8iV8iV8iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvd512_maskz, "V16iV16iV16iV16iUs", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvq128_maskz, "V2LLiV2LLiV2LLiV2LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvq256_maskz, "V4LLiV4LLiV4LLiV4LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvq512_maskz, "V8LLiV8LLiV8LLiV8LLiUc", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvw128_maskz, "V8sV8sV8sV8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvw256_maskz, "V16sV16sV16sV16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvw512_maskz, "V32sV32sV32sV32sUi", "", "avx512vbmi2")
+
+TARGET_BUILTIN(__builtin_ia32_vpshrdvd128_mask, "V4iV4iV4iV4iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvd256_mask, "V8iV8iV8iV8iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvd512_mask, "V16iV16iV16iV16iUs", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvq128_mask, "V2LLiV2LLiV2LLiV2LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvq256_mask, "V4LLiV4LLiV4LLiV4LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvq512_mask, "V8LLiV8LLiV8LLiV8LLiUc", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvw128_mask, "V8sV8sV8sV8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvw256_mask, "V16sV16sV16sV16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvw512_mask, "V32sV32sV32sV32sUi", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvd128_maskz, "V4iV4iV4iV4iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvd256_maskz, "V8iV8iV8iV8iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvd512_maskz, "V16iV16iV16iV16iUs", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvq128_maskz, "V2LLiV2LLiV2LLiV2LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvq256_maskz, "V4LLiV4LLiV4LLiV4LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvq512_maskz, "V8LLiV8LLiV8LLiV8LLiUc", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvw128_maskz, "V8sV8sV8sV8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvw256_maskz, "V16sV16sV16sV16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvw512_maskz, "V32sV32sV32sV32sUi", "", "avx512vbmi2")
+
+TARGET_BUILTIN(__builtin_ia32_vpshrdd128_mask, "V4iV4iV4iiV4iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdd256_mask, "V8iV8iV8iiV8iUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdd512_mask, "V16iV16iV16iiV16iUs", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdq128_mask, "V2LLiV2LLiV2LLiiV2LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdq256_mask, "V4LLiV4LLiV4LLiiV4LLiUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdq512_mask, "V8LLiV8LLiV8LLiiV8LLiUc", "", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdw128_mask, "V8sV8sV8siV8sUc", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdw256_mask, "V16sV16sV16siV16sUs", "", "avx512vl,avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdw512_mask, "V32sV32sV32siV32sUi", "", "avx512vbmi2")
+
TARGET_BUILTIN(__builtin_ia32_pmovswb512_mask, "V32cV32sV32cUi", "", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovuswb512_mask, "V32cV32sV32cUi", "", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovwb512_mask, "V32cV32sV32cUi", "", "avx512bw")
@@ -1738,14 +1821,20 @@ TARGET_BUILTIN(__builtin_ia32_dbpsadbw51
TARGET_BUILTIN(__builtin_ia32_psadbw512, "V8LLiV64cV64c","","avx512bw")
TARGET_BUILTIN(__builtin_ia32_compressdf512_mask, "V8dV8dV8dUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_compressdi512_mask, "V8LLiV8LLiV8LLiUc","","avx512f")
+TARGET_BUILTIN(__builtin_ia32_compresshi512_mask, "V32sV32sV32sUi","","avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_compressqi512_mask, "V64cV64cV64cULLi","","avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_compresssf512_mask, "V16fV16fV16fUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_compresssi512_mask, "V16iV16iV16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_cmpsd_mask, "UcV2dV2dIiUcIi","","avx512f")
TARGET_BUILTIN(__builtin_ia32_cmpss_mask, "UcV4fV4fIiUcIi","","avx512f")
TARGET_BUILTIN(__builtin_ia32_expanddf512_mask, "V8dV8dV8dUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_expanddi512_mask, "V8LLiV8LLiV8LLiUc","","avx512f")
+TARGET_BUILTIN(__builtin_ia32_expandhi512_mask, "V32sV32sV32sUi","","avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_expandqi512_mask, "V64cV64cV64cULLi","","avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_expandloaddf512_mask, "V8dV8dC*V8dUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_expandloaddi512_mask, "V8LLiV8LLiC*V8LLiUc","","avx512f")
+TARGET_BUILTIN(__builtin_ia32_expandloadhi512_mask, "V32sV32sC*V32sUi","","avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_expandloadqi512_mask, "V64cV64cC*V64cULLi","","avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_expandloadsf512_mask, "V16fV16fC*V16fUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_expandloadsi512_mask, "V16iV16iC*V16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_expandsf512_mask, "V16fV16fV16fUs","","avx512f")
@@ -1753,6 +1842,8 @@ TARGET_BUILTIN(__builtin_ia32_expandsi51
TARGET_BUILTIN(__builtin_ia32_cvtps2pd512_mask, "V8dV8fV8dUcIi","","avx512f")
TARGET_BUILTIN(__builtin_ia32_compressstoredf512_mask, "vV8d*V8dUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_compressstoredi512_mask, "vV8LLi*V8LLiUc","","avx512f")
+TARGET_BUILTIN(__builtin_ia32_compressstorehi512_mask, "vV32s*V32sUi","","avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_compressstoreqi512_mask, "vV64c*V64cULLi","","avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_compressstoresf512_mask, "vV16f*V16fUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_compressstoresi512_mask, "vV16i*V16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_vcvtph2ps_mask, "V4fV8sV4fUc","","avx512vl")
Modified: cfe/trunk/include/clang/Driver/Options.td
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Driver/Options.td?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/include/clang/Driver/Options.td (original)
+++ cfe/trunk/include/clang/Driver/Options.td Wed Dec 27 03:25:07 2017
@@ -2483,6 +2483,8 @@ def mavx512pf : Flag<["-"], "mavx512pf">
def mno_avx512pf : Flag<["-"], "mno-avx512pf">, Group<m_x86_Features_Group>;
def mavx512vbmi : Flag<["-"], "mavx512vbmi">, Group<m_x86_Features_Group>;
def mno_avx512vbmi : Flag<["-"], "mno-avx512vbmi">, Group<m_x86_Features_Group>;
+def mavx512vbmi2 : Flag<["-"], "mavx512vbmi2">, Group<m_x86_Features_Group>;
+def mno_avx512vbmi2 : Flag<["-"], "mno-avx512vbmi2">, Group<m_x86_Features_Group>;
def mavx512vl : Flag<["-"], "mavx512vl">, Group<m_x86_Features_Group>;
def mno_avx512vl : Flag<["-"], "mno-avx512vl">, Group<m_x86_Features_Group>;
def mavx512vnni : Flag<["-"], "mavx512vnni">, Group<m_x86_Features_Group>;
Modified: cfe/trunk/lib/Basic/Targets/X86.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Basic/Targets/X86.cpp?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/lib/Basic/Targets/X86.cpp (original)
+++ cfe/trunk/lib/Basic/Targets/X86.cpp Wed Dec 27 03:25:07 2017
@@ -132,12 +132,13 @@ bool X86TargetInfo::initFeatureMap(
break;
case CK_Icelake:
+ // TODO: Add icelake features here.
setFeatureEnabledImpl(Features, "vaes", true);
setFeatureEnabledImpl(Features, "gfni", true);
setFeatureEnabledImpl(Features, "vpclmulqdq", true);
setFeatureEnabledImpl(Features, "avx512bitalg", true);
setFeatureEnabledImpl(Features, "avx512vnni", true);
- // TODO: Add icelake features here.
+ setFeatureEnabledImpl(Features, "avx512vbmi2", true);
LLVM_FALLTHROUGH;
case CK_Cannonlake:
setFeatureEnabledImpl(Features, "avx512ifma", true);
@@ -476,7 +477,8 @@ void X86TargetInfo::setSSELevel(llvm::St
Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] =
Features["avx512vl"] = Features["avx512vbmi"] =
Features["avx512ifma"] = Features["avx512vpopcntdq"] =
- Features["avx512bitalg"] = Features["avx512vnni"] = false;
+ Features["avx512bitalg"] = Features["avx512vnni"] =
+ Features["avx512vbmi2"] = false;
break;
}
}
@@ -608,15 +610,16 @@ void X86TargetInfo::setFeatureEnabledImp
Name == "avx512dq" || Name == "avx512bw" || Name == "avx512vl" ||
Name == "avx512vbmi" || Name == "avx512ifma" ||
Name == "avx512vpopcntdq" || Name == "avx512bitalg" ||
- Name == "avx512vnni") {
+ Name == "avx512vnni" || Name == "avx512vbmi2") {
if (Enabled)
setSSELevel(Features, AVX512F, Enabled);
- // Enable BWI instruction if VBMI / BITALG is being enabled.
- if ((Name == "avx512vbmi" || Name == "avx512bitalg") && Enabled)
+ // Enable BWI instruction if VBMI/VBMI2/BITALG is being enabled.
+ if ((Name.startswith("avx512vbmi") || Name == "avx512bitalg") && Enabled)
Features["avx512bw"] = true;
- // Also disable VBMI / BITALG if BWI is being disabled.
+ // Also disable VBMI/VBMI2/BITALG if BWI is being disabled.
if (Name == "avx512bw" && !Enabled)
- Features["avx512vbmi"] = Features["avx512bitalg"] = false;
+ Features["avx512vbmi"] = Features["avx512vbmi2"] =
+ Features["avx512bitalg"] = false;
} else if (Name == "fma") {
if (Enabled)
setSSELevel(Features, AVX, Enabled);
@@ -716,6 +719,8 @@ bool X86TargetInfo::handleTargetFeatures
HasAVX512VL = true;
} else if (Feature == "+avx512vbmi") {
HasAVX512VBMI = true;
+ } else if (Feature == "+avx512vbmi2") {
+ HasAVX512VBMI2 = true;
} else if (Feature == "+avx512ifma") {
HasAVX512IFMA = true;
} else if (Feature == "+sha") {
@@ -1059,6 +1064,8 @@ void X86TargetInfo::getTargetDefines(con
Builder.defineMacro("__AVX512VL__");
if (HasAVX512VBMI)
Builder.defineMacro("__AVX512VBMI__");
+ if (HasAVX512VBMI2)
+ Builder.defineMacro("__AVX512VBMI2__");
if (HasAVX512IFMA)
Builder.defineMacro("__AVX512IFMA__");
@@ -1196,6 +1203,7 @@ bool X86TargetInfo::isValidFeatureName(S
.Case("avx512bw", true)
.Case("avx512vl", true)
.Case("avx512vbmi", true)
+ .Case("avx512vbmi2", true)
.Case("avx512ifma", true)
.Case("bmi", true)
.Case("bmi2", true)
@@ -1263,6 +1271,7 @@ bool X86TargetInfo::hasFeature(StringRef
.Case("avx512bw", HasAVX512BW)
.Case("avx512vl", HasAVX512VL)
.Case("avx512vbmi", HasAVX512VBMI)
+ .Case("avx512vbmi2", HasAVX512VBMI2)
.Case("avx512ifma", HasAVX512IFMA)
.Case("bmi", HasBMI)
.Case("bmi2", HasBMI2)
Modified: cfe/trunk/lib/Basic/Targets/X86.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Basic/Targets/X86.h?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/lib/Basic/Targets/X86.h (original)
+++ cfe/trunk/lib/Basic/Targets/X86.h Wed Dec 27 03:25:07 2017
@@ -76,6 +76,7 @@ class LLVM_LIBRARY_VISIBILITY X86TargetI
bool HasAVX512BW = false;
bool HasAVX512VL = false;
bool HasAVX512VBMI = false;
+ bool HasAVX512VBMI2 = false;
bool HasAVX512IFMA = false;
bool HasSHA = false;
bool HasMPX = false;
Modified: cfe/trunk/lib/Headers/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/CMakeLists.txt?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/CMakeLists.txt (original)
+++ cfe/trunk/lib/Headers/CMakeLists.txt Wed Dec 27 03:25:07 2017
@@ -19,6 +19,8 @@ set(files
avx512pfintrin.h
avx512vbmiintrin.h
avx512vbmivlintrin.h
+ avx512vbmi2intrin.h
+ avx512vlvbmi2intrin.h
avx512vlbwintrin.h
avx512vlcdintrin.h
avx512vldqintrin.h
Added: cfe/trunk/lib/Headers/avx512vbmi2intrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avx512vbmi2intrin.h?rev=321487&view=auto
==============================================================================
--- cfe/trunk/lib/Headers/avx512vbmi2intrin.h (added)
+++ cfe/trunk/lib/Headers/avx512vbmi2intrin.h Wed Dec 27 03:25:07 2017
@@ -0,0 +1,391 @@
+/*===------------- avx512vbmi2intrin.h - VBMI2 intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vbmi2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VBMI2INTRIN_H
+#define __AVX512VBMI2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi2")))
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_compress_epi16(__m512i __S, __mmask32 __U, __m512i __D)
+{
+ return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi) __D,
+ (__v32hi) __S,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_epi16(__mmask32 __U, __m512i __D)
+{
+ return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi) __D,
+ (__v32hi) _mm512_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_compress_epi8(__m512i __S, __mmask64 __U, __m512i __D)
+{
+ return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi) __D,
+ (__v64qi) __S,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_epi8(__mmask64 __U, __m512i __D)
+{
+ return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi) __D,
+ (__v64qi) _mm512_setzero_qi(),
+ __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_epi16(void *__P, __mmask32 __U, __m512i __D)
+{
+ __builtin_ia32_compressstorehi512_mask ((__v32hi *) __P, (__v32hi) __D,
+ __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_epi8(void *__P, __mmask64 __U, __m512i __D)
+{
+ __builtin_ia32_compressstoreqi512_mask ((__v64qi *) __P, (__v64qi) __D,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expand_epi16(__m512i __S, __mmask32 __U, __m512i __D)
+{
+ return (__m512i) __builtin_ia32_expandhi512_mask ((__v32hi) __D,
+ (__v32hi) __S,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_epi16(__mmask32 __U, __m512i __D)
+{
+ return (__m512i) __builtin_ia32_expandhi512_mask ((__v32hi) __D,
+ (__v32hi) _mm512_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expand_epi8(__m512i __S, __mmask64 __U, __m512i __D)
+{
+ return (__m512i) __builtin_ia32_expandqi512_mask ((__v64qi) __D,
+ (__v64qi) __S,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_epi8(__mmask64 __U, __m512i __D)
+{
+ return (__m512i) __builtin_ia32_expandqi512_mask ((__v64qi) __D,
+ (__v64qi) _mm512_setzero_qi(),
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_epi16(__m512i __S, __mmask32 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_expandloadhi512_mask ((const __v32hi *)__P,
+ (__v32hi) __S,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_epi16(__mmask32 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_expandloadhi512_mask ((const __v32hi *)__P,
+ (__v32hi) _mm512_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_epi8(__m512i __S, __mmask64 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_expandloadqi512_mask ((const __v64qi *)__P,
+ (__v64qi) __S,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_expandloadqi512_mask ((const __v64qi *)__P,
+ (__v64qi) _mm512_setzero_qi(),
+ __U);
+}
+
+#define _mm512_mask_shldi_epi64(S, U, A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_vpshldq512_mask((__v8di)(A), \
+ (__v8di)(B), \
+ (int)(I), \
+ (__v8di)(S), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_shldi_epi64(U, A, B, I) \
+ _mm512_mask_shldi_epi64(_mm512_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm512_shldi_epi64(A, B, I) \
+ _mm512_mask_shldi_epi64(_mm512_undefined(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm512_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_vpshldd512_mask((__v16si)(A), \
+ (__v16si)(B), \
+ (int)(I), \
+ (__v16si)(S), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_shldi_epi32(U, A, B, I) \
+ _mm512_mask_shldi_epi32(_mm512_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm512_shldi_epi32(A, B, I) \
+ _mm512_mask_shldi_epi32(_mm512_undefined(), (__mmask16)(-1), (A), (B), (I))
+
+#define _mm512_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_vpshldw512_mask((__v32hi)(A), \
+ (__v32hi)(B), \
+ (int)(I), \
+ (__v32hi)(S), \
+ (__mmask32)(U)); })
+
+#define _mm512_maskz_shldi_epi16(U, A, B, I) \
+ _mm512_mask_shldi_epi16(_mm512_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm512_shldi_epi16(A, B, I) \
+ _mm512_mask_shldi_epi16(_mm512_undefined(), (__mmask32)(-1), (A), (B), (I))
+
+#define _mm512_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_vpshrdq512_mask((__v8di)(A), \
+ (__v8di)(B), \
+ (int)(I), \
+ (__v8di)(S), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_shrdi_epi64(U, A, B, I) \
+ _mm512_mask_shrdi_epi64(_mm512_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm512_shrdi_epi64(A, B, I) \
+ _mm512_mask_shrdi_epi64(_mm512_undefined(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm512_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_vpshrdd512_mask((__v16si)(A), \
+ (__v16si)(B), \
+ (int)(I), \
+ (__v16si)(S), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_shrdi_epi32(U, A, B, I) \
+ _mm512_mask_shrdi_epi32(_mm512_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm512_shrdi_epi32(A, B, I) \
+ _mm512_mask_shrdi_epi32(_mm512_undefined(), (__mmask16)(-1), (A), (B), (I))
+
+#define _mm512_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_vpshrdw512_mask((__v32hi)(A), \
+ (__v32hi)(B), \
+ (int)(I), \
+ (__v32hi)(S), \
+ (__mmask32)(U)); })
+
+#define _mm512_maskz_shrdi_epi16(U, A, B, I) \
+ _mm512_mask_shrdi_epi16(_mm512_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm512_shrdi_epi16(A, B, I) \
+ _mm512_mask_shrdi_epi16(_mm512_undefined(), (__mmask32)(-1), (A), (B), (I))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shldv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvq512_mask ((__v8di) __S,
+ (__v8di) __A,
+ (__v8di) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvq512_maskz ((__v8di) __S,
+ (__v8di) __A,
+ (__v8di) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shldv_epi64(__m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvq512_mask ((__v8di) __S,
+ (__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shldv_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvd512_mask ((__v16si) __S,
+ (__v16si) __A,
+ (__v16si) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shldv_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvd512_maskz ((__v16si) __S,
+ (__v16si) __A,
+ (__v16si) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shldv_epi32(__m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvd512_mask ((__v16si) __S,
+ (__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shldv_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvw512_mask ((__v32hi) __S,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shldv_epi16(__mmask32 __U, __m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvw512_maskz ((__v32hi) __S,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shldv_epi16(__m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshldvw512_mask ((__v32hi) __S,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shrdv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvq512_mask ((__v8di) __S,
+ (__v8di) __A,
+ (__v8di) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shrdv_epi64(__mmask8 __U, __m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvq512_maskz ((__v8di) __S,
+ (__v8di) __A,
+ (__v8di) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shrdv_epi64(__m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvq512_mask ((__v8di) __S,
+ (__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shrdv_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvd512_mask ((__v16si) __S,
+ (__v16si) __A,
+ (__v16si) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shrdv_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvd512_maskz ((__v16si) __S,
+ (__v16si) __A,
+ (__v16si) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shrdv_epi32(__m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvd512_mask ((__v16si) __S,
+ (__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shrdv_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvw512_mask ((__v32hi) __S,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shrdv_epi16(__mmask32 __U, __m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvw512_maskz ((__v32hi) __S,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shrdv_epi16(__m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpshrdvw512_mask ((__v32hi) __S,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) -1);
+}
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
+
Propchange: cfe/trunk/lib/Headers/avx512vbmi2intrin.h
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: cfe/trunk/lib/Headers/avx512vbmi2intrin.h
------------------------------------------------------------------------------
svn:keywords = Author Date Id Rev URL
Propchange: cfe/trunk/lib/Headers/avx512vbmi2intrin.h
------------------------------------------------------------------------------
svn:mime-type = text/plain
Added: cfe/trunk/lib/Headers/avx512vlvbmi2intrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avx512vlvbmi2intrin.h?rev=321487&view=auto
==============================================================================
--- cfe/trunk/lib/Headers/avx512vlvbmi2intrin.h (added)
+++ cfe/trunk/lib/Headers/avx512vlvbmi2intrin.h Wed Dec 27 03:25:07 2017
@@ -0,0 +1,748 @@
+/*===------------- avx512vlvbmi2intrin.h - VBMI2 intrinsics -----------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlvbmi2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLVBMI2INTRIN_H
+#define __AVX512VLVBMI2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2")))
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm128_setzero_hi(void) {
+ return (__m128i)(__v8hi){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D,
+ (__v8hi) __S,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_compress_epi16(__mmask8 __U, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D,
+ (__v8hi) _mm128_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D,
+ (__v16qi) __S,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_compress_epi8(__mmask16 __U, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D,
+ (__v16qi) _mm128_setzero_hi(),
+ __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm128_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D)
+{
+ __builtin_ia32_compressstorehi128_mask ((__v8hi *) __P, (__v8hi) __D,
+ __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm128_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D)
+{
+ __builtin_ia32_compressstoreqi128_mask ((__v16qi *) __P, (__v16qi) __D,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D,
+ (__v8hi) __S,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_expand_epi16(__mmask8 __U, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D,
+ (__v8hi) _mm128_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D,
+ (__v16qi) __S,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_expand_epi8(__mmask16 __U, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D,
+ (__v16qi) _mm128_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P,
+ (__v8hi) __S,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_expandloadu_epi16(__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P,
+ (__v8hi) _mm128_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P,
+ (__v16qi) __S,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_expandloadu_epi8(__mmask16 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P,
+ (__v16qi) _mm128_setzero_hi(),
+ __U);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setzero_hi(void) {
+ return (__m256i)(__v16hi){ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D)
+{
+ return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D,
+ (__v16hi) __S,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D)
+{
+ return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D,
+ (__v16hi) _mm256_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D)
+{
+ return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D,
+ (__v32qi) __S,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D)
+{
+ return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D,
+ (__v32qi) _mm256_setzero_hi(),
+ __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i __D)
+{
+ __builtin_ia32_compressstorehi256_mask ((__v16hi *) __P, (__v16hi) __D,
+ __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i __D)
+{
+ __builtin_ia32_compressstoreqi256_mask ((__v32qi *) __P, (__v32qi) __D,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D)
+{
+ return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D,
+ (__v16hi) __S,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D)
+{
+ return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D,
+ (__v16hi) _mm256_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D)
+{
+ return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D,
+ (__v32qi) __S,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D)
+{
+ return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D,
+ (__v32qi) _mm256_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P,
+ (__v16hi) __S,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi16(__mmask16 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P,
+ (__v16hi) _mm256_setzero_hi(),
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P,
+ (__v32qi) __S,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P,
+ (__v32qi) _mm256_setzero_hi(),
+ __U);
+}
+
+#define _mm256_mask_shldi_epi64(S, U, A, B, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vpshldq256_mask((__v4di)(A), \
+ (__v4di)(B), \
+ (int)(I), \
+ (__v4di)(S), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_shldi_epi64(U, A, B, I) \
+ _mm256_mask_shldi_epi64(_mm256_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm256_shldi_epi64(A, B, I) \
+ _mm256_mask_shldi_epi64(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm128_mask_shldi_epi64(S, U, A, B, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpshldq128_mask((__v2di)(A), \
+ (__v2di)(B), \
+ (int)(I), \
+ (__v2di)(S), \
+ (__mmask8)(U)); })
+
+#define _mm128_maskz_shldi_epi64(U, A, B, I) \
+ _mm128_mask_shldi_epi64(_mm128_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm128_shldi_epi64(A, B, I) \
+ _mm128_mask_shldi_epi64(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm256_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vpshldd256_mask((__v8si)(A), \
+ (__v8si)(B), \
+ (int)(I), \
+ (__v8si)(S), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_shldi_epi32(U, A, B, I) \
+ _mm256_mask_shldi_epi32(_mm256_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm256_shldi_epi32(A, B, I) \
+ _mm256_mask_shldi_epi32(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm128_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpshldd128_mask((__v4si)(A), \
+ (__v4si)(B), \
+ (int)(I), \
+ (__v4si)(S), \
+ (__mmask8)(U)); })
+
+#define _mm128_maskz_shldi_epi32(U, A, B, I) \
+ _mm128_mask_shldi_epi32(_mm128_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm128_shldi_epi32(A, B, I) \
+ _mm128_mask_shldi_epi32(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm256_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vpshldw256_mask((__v16hi)(A), \
+ (__v16hi)(B), \
+ (int)(I), \
+ (__v16hi)(S), \
+ (__mmask16)(U)); })
+
+#define _mm256_maskz_shldi_epi16(U, A, B, I) \
+ _mm256_mask_shldi_epi16(_mm256_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm256_shldi_epi16(A, B, I) \
+ _mm256_mask_shldi_epi16(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm128_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpshldw128_mask((__v8hi)(A), \
+ (__v8hi)(B), \
+ (int)(I), \
+ (__v8hi)(S), \
+ (__mmask8)(U)); })
+
+#define _mm128_maskz_shldi_epi16(U, A, B, I) \
+ _mm128_mask_shldi_epi16(_mm128_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm128_shldi_epi16(A, B, I) \
+ _mm128_mask_shldi_epi16(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm256_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vpshrdq256_mask((__v4di)(A), \
+ (__v4di)(B), \
+ (int)(I), \
+ (__v4di)(S), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_shrdi_epi64(U, A, B, I) \
+ _mm256_mask_shrdi_epi64(_mm256_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm256_shrdi_epi64(A, B, I) \
+ _mm256_mask_shrdi_epi64(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm128_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpshrdq128_mask((__v2di)(A), \
+ (__v2di)(B), \
+ (int)(I), \
+ (__v2di)(S), \
+ (__mmask8)(U)); })
+
+#define _mm128_maskz_shrdi_epi64(U, A, B, I) \
+ _mm128_mask_shrdi_epi64(_mm128_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm128_shrdi_epi64(A, B, I) \
+ _mm128_mask_shrdi_epi64(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm256_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vpshrdd256_mask((__v8si)(A), \
+ (__v8si)(B), \
+ (int)(I), \
+ (__v8si)(S), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_shrdi_epi32(U, A, B, I) \
+ _mm256_mask_shrdi_epi32(_mm256_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm256_shrdi_epi32(A, B, I) \
+ _mm256_mask_shrdi_epi32(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm128_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpshrdd128_mask((__v4si)(A), \
+ (__v4si)(B), \
+ (int)(I), \
+ (__v4si)(S), \
+ (__mmask8)(U)); })
+
+#define _mm128_maskz_shrdi_epi32(U, A, B, I) \
+ _mm128_mask_shrdi_epi32(_mm128_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm128_shrdi_epi32(A, B, I) \
+ _mm128_mask_shrdi_epi32(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm256_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vpshrdw256_mask((__v16hi)(A), \
+ (__v16hi)(B), \
+ (int)(I), \
+ (__v16hi)(S), \
+ (__mmask16)(U)); })
+
+#define _mm256_maskz_shrdi_epi16(U, A, B, I) \
+ _mm256_mask_shrdi_epi16(_mm256_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm256_shrdi_epi16(A, B, I) \
+ _mm256_mask_shrdi_epi16(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+
+#define _mm128_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpshrdw128_mask((__v8hi)(A), \
+ (__v8hi)(B), \
+ (int)(I), \
+ (__v8hi)(S), \
+ (__mmask8)(U)); })
+
+#define _mm128_maskz_shrdi_epi16(U, A, B, I) \
+ _mm128_mask_shrdi_epi16(_mm128_setzero_hi(), (U), (A), (B), (I))
+
+#define _mm128_shrdi_epi16(A, B, I) \
+ _mm128_mask_shrdi_epi16(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shldv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvq256_mask ((__v4di) __S,
+ (__v4di) __A,
+ (__v4di) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvq256_maskz ((__v4di) __S,
+ (__v4di) __A,
+ (__v4di) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shldv_epi64(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvq256_mask ((__v4di) __S,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvq128_mask ((__v2di) __S,
+ (__v2di) __A,
+ (__v2di) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvq128_maskz ((__v2di) __S,
+ (__v2di) __A,
+ (__v2di) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_shldv_epi64(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvq128_mask ((__v2di) __S,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shldv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvd256_mask ((__v8si) __S,
+ (__v8si) __A,
+ (__v8si) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvd256_maskz ((__v8si) __S,
+ (__v8si) __A,
+ (__v8si) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shldv_epi32(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvd256_mask ((__v8si) __S,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvd128_mask ((__v4si) __S,
+ (__v4si) __A,
+ (__v4si) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvd128_maskz ((__v4si) __S,
+ (__v4si) __A,
+ (__v4si) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_shldv_epi32(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvd128_mask ((__v4si) __S,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shldv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvw256_mask ((__v16hi) __S,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvw256_maskz ((__v16hi) __S,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shldv_epi16(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshldvw256_mask ((__v16hi) __S,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvw128_mask ((__v8hi) __S,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvw128_maskz ((__v8hi) __S,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_shldv_epi16(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshldvw128_mask ((__v8hi) __S,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shrdv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvq256_mask ((__v4di) __S,
+ (__v4di) __A,
+ (__v4di) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvq256_maskz ((__v4di) __S,
+ (__v4di) __A,
+ (__v4di) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shrdv_epi64(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvq256_mask ((__v4di) __S,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvq128_mask ((__v2di) __S,
+ (__v2di) __A,
+ (__v2di) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvq128_maskz ((__v2di) __S,
+ (__v2di) __A,
+ (__v2di) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvq128_mask ((__v2di) __S,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shrdv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvd256_mask ((__v8si) __S,
+ (__v8si) __A,
+ (__v8si) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvd256_maskz ((__v8si) __S,
+ (__v8si) __A,
+ (__v8si) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shrdv_epi32(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvd256_mask ((__v8si) __S,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvd128_mask ((__v4si) __S,
+ (__v4si) __A,
+ (__v4si) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvd128_maskz ((__v4si) __S,
+ (__v4si) __A,
+ (__v4si) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvd128_mask ((__v4si) __S,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shrdv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvw256_mask ((__v16hi) __S,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvw256_maskz ((__v16hi) __S,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shrdv_epi16(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpshrdvw256_mask ((__v16hi) __S,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvw128_mask ((__v8hi) __S,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvw128_maskz ((__v8hi) __S,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm128_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpshrdvw128_mask ((__v8hi) __S,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) -1);
+}
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
Propchange: cfe/trunk/lib/Headers/avx512vlvbmi2intrin.h
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: cfe/trunk/lib/Headers/avx512vlvbmi2intrin.h
------------------------------------------------------------------------------
svn:keywords = Author Date Id Rev URL
Propchange: cfe/trunk/lib/Headers/avx512vlvbmi2intrin.h
------------------------------------------------------------------------------
svn:mime-type = text/plain
Modified: cfe/trunk/lib/Headers/immintrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/immintrin.h?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/immintrin.h (original)
+++ cfe/trunk/lib/Headers/immintrin.h Wed Dec 27 03:25:07 2017
@@ -222,6 +222,15 @@ _mm256_cvtph_ps(__m128i __a)
#include <avx512vbmivlintrin.h>
#endif
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI2__)
+#include <avx512vbmi2intrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AVX512VBMI2__) && defined(__AVX512VL__))
+#include <avx512vlvbmi2intrin.h>
+#endif
+
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
#include <avx512pfintrin.h>
#endif
Modified: cfe/trunk/test/CodeGen/attr-target-x86.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/attr-target-x86.c?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/attr-target-x86.c (original)
+++ cfe/trunk/test/CodeGen/attr-target-x86.c Wed Dec 27 03:25:07 2017
@@ -38,9 +38,9 @@ int __attribute__((target("arch=lakemont
// CHECK: lake{{.*}} #7
// CHECK: #0 = {{.*}}"target-cpu"="i686" "target-features"="+x87"
// CHECK: #1 = {{.*}}"target-cpu"="ivybridge" "target-features"="+aes,+avx,+cx16,+f16c,+fsgsbase,+fxsr,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt"
-// CHECK: #2 = {{.*}}"target-cpu"="i686" "target-features"="+x87,-aes,-avx,-avx2,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vl,-avx512vnni,-avx512vpopcntdq,-f16c,-fma,-fma4,-gfni,-pclmul,-sha,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-xop,-xsave,-xsaveopt"
+// CHECK: #2 = {{.*}}"target-cpu"="i686" "target-features"="+x87,-aes,-avx,-avx2,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vpopcntdq,-f16c,-fma,-fma4,-gfni,-pclmul,-sha,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-xop,-xsave,-xsaveopt"
// CHECK: #3 = {{.*}}"target-cpu"="i686" "target-features"="+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87"
-// CHECK: #4 = {{.*}}"target-cpu"="i686" "target-features"="+x87,-avx,-avx2,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vl,-avx512vnni,-avx512vpopcntdq,-f16c,-fma,-fma4,-sse4.1,-sse4.2,-vaes,-vpclmulqdq,-xop,-xsave,-xsaveopt"
+// CHECK: #4 = {{.*}}"target-cpu"="i686" "target-features"="+x87,-avx,-avx2,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vpopcntdq,-f16c,-fma,-fma4,-sse4.1,-sse4.2,-vaes,-vpclmulqdq,-xop,-xsave,-xsaveopt"
// CHECK: #5 = {{.*}}"target-cpu"="ivybridge" "target-features"="+avx,+cx16,+f16c,+fsgsbase,+fxsr,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-aes,-vaes"
// CHECK: #6 = {{.*}}"target-cpu"="i686" "target-features"="+x87,-3dnow,-3dnowa,-mmx"
// CHECK: #7 = {{.*}}"target-cpu"="lakemont" "target-features"="+mmx"
Added: cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c?rev=321487&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c (added)
+++ cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c Wed Dec 27 03:25:07 2017
@@ -0,0 +1,304 @@
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <immintrin.h>
+
+__m512i test_mm512_mask_compress_epi16(__m512i __S, __mmask32 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_mask_compress_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.w.512
+ return _mm512_mask_compress_epi16(__S, __U, __D);
+}
+
+__m512i test_mm512_maskz_compress_epi16(__mmask32 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_maskz_compress_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.w.512
+ return _mm512_maskz_compress_epi16(__U, __D);
+}
+
+__m512i test_mm512_mask_compress_epi8(__m512i __S, __mmask64 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_mask_compress_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.b.512
+ return _mm512_mask_compress_epi8(__S, __U, __D);
+}
+
+__m512i test_mm512_maskz_compress_epi8(__mmask64 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_maskz_compress_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.b.512
+ return _mm512_maskz_compress_epi8(__U, __D);
+}
+
+void test_mm512_mask_compressstoreu_epi16(void *__P, __mmask32 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_mask_compressstoreu_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.store.w.512
+ _mm512_mask_compressstoreu_epi16(__P, __U, __D);
+}
+
+void test_mm512_mask_compressstoreu_epi8(void *__P, __mmask64 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_mask_compressstoreu_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.store.b.512
+ _mm512_mask_compressstoreu_epi8(__P, __U, __D);
+}
+
+__m512i test_mm512_mask_expand_epi16(__m512i __S, __mmask32 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_mask_expand_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.w.512
+ return _mm512_mask_expand_epi16(__S, __U, __D);
+}
+
+__m512i test_mm512_maskz_expand_epi16(__mmask32 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_maskz_expand_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.w.512
+ return _mm512_maskz_expand_epi16(__U, __D);
+}
+
+__m512i test_mm512_mask_expand_epi8(__m512i __S, __mmask64 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_mask_expand_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.b.512
+ return _mm512_mask_expand_epi8(__S, __U, __D);
+}
+
+__m512i test_mm512_maskz_expand_epi8(__mmask64 __U, __m512i __D) {
+ // CHECK-LABEL: @test_mm512_maskz_expand_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.b.512
+ return _mm512_maskz_expand_epi8(__U, __D);
+}
+
+__m512i test_mm512_mask_expandloadu_epi16(__m512i __S, __mmask32 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm512_mask_expandloadu_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.load.w.512
+ return _mm512_mask_expandloadu_epi16(__S, __U, __P);
+}
+
+__m512i test_mm512_maskz_expandloadu_epi16(__mmask32 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm512_maskz_expandloadu_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.load.w.512
+ return _mm512_maskz_expandloadu_epi16(__U, __P);
+}
+
+__m512i test_mm512_mask_expandloadu_epi8(__m512i __S, __mmask64 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm512_mask_expandloadu_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.load.b.512
+ return _mm512_mask_expandloadu_epi8(__S, __U, __P);
+}
+
+__m512i test_mm512_maskz_expandloadu_epi8(__mmask64 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm512_maskz_expandloadu_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.load.b.512
+ return _mm512_maskz_expandloadu_epi8(__U, __P);
+}
+
+__m512i test_mm512_mask_shldi_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.512
+ return _mm512_mask_shldi_epi64(__S, __U, __A, __B, 127);
+}
+
+__m512i test_mm512_maskz_shldi_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.512
+ return _mm512_maskz_shldi_epi64(__U, __A, __B, 63);
+}
+
+__m512i test_mm512_shldi_epi64(__m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.512
+ return _mm512_shldi_epi64(__A, __B, 31);
+}
+
+__m512i test_mm512_mask_shldi_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.512
+ return _mm512_mask_shldi_epi32(__S, __U, __A, __B, 127);
+}
+
+__m512i test_mm512_maskz_shldi_epi32(__mmask16 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.512
+ return _mm512_maskz_shldi_epi32(__U, __A, __B, 63);
+}
+
+__m512i test_mm512_shldi_epi32(__m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.512
+ return _mm512_shldi_epi32(__A, __B, 31);
+}
+
+__m512i test_mm512_mask_shldi_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.512
+ return _mm512_mask_shldi_epi16(__S, __U, __A, __B, 127);
+}
+
+__m512i test_mm512_maskz_shldi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.512
+ return _mm512_maskz_shldi_epi16(__U, __A, __B, 63);
+}
+
+__m512i test_mm512_shldi_epi16(__m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.512
+ return _mm512_shldi_epi16(__A, __B, 31);
+}
+
+__m512i test_mm512_mask_shrdi_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.512
+ return _mm512_mask_shrdi_epi64(__S, __U, __A, __B, 127);
+}
+
+__m512i test_mm512_maskz_shrdi_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.512
+ return _mm512_maskz_shrdi_epi64(__U, __A, __B, 63);
+}
+
+__m512i test_mm512_shrdi_epi64(__m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.512
+ return _mm512_shrdi_epi64(__A, __B, 31);
+}
+
+__m512i test_mm512_mask_shrdi_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.512
+ return _mm512_mask_shrdi_epi32(__S, __U, __A, __B, 127);
+}
+
+__m512i test_mm512_maskz_shrdi_epi32(__mmask16 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.512
+ return _mm512_maskz_shrdi_epi32(__U, __A, __B, 63);
+}
+
+__m512i test_mm512_shrdi_epi32(__m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.512
+ return _mm512_shrdi_epi32(__A, __B, 31);
+}
+
+__m512i test_mm512_mask_shrdi_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.512
+ return _mm512_mask_shrdi_epi16(__S, __U, __A, __B, 127);
+}
+
+__m512i test_mm512_maskz_shrdi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.512
+ return _mm512_maskz_shrdi_epi16(__U, __A, __B, 63);
+}
+
+__m512i test_mm512_shrdi_epi16(__m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.512
+ return _mm512_shrdi_epi16(__A, __B, 31);
+}
+
+__m512i test_mm512_mask_shldv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shldv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.q.512
+ return _mm512_mask_shldv_epi64(__S, __U, __A, __B);
+}
+
+__m512i test_mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shldv_epi64
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.q.512
+ return _mm512_maskz_shldv_epi64(__U, __S, __A, __B);
+}
+
+__m512i test_mm512_shldv_epi64(__m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shldv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.q.512
+ return _mm512_shldv_epi64(__S, __A, __B);
+}
+
+__m512i test_mm512_mask_shldv_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shldv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.d.512
+ return _mm512_mask_shldv_epi32(__S, __U, __A, __B);
+}
+
+__m512i test_mm512_maskz_shldv_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shldv_epi32
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.d.512
+ return _mm512_maskz_shldv_epi32(__U, __S, __A, __B);
+}
+
+__m512i test_mm512_shldv_epi32(__m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shldv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.d.512
+ return _mm512_shldv_epi32(__S, __A, __B);
+}
+
+__m512i test_mm512_mask_shldv_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shldv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.w.512
+ return _mm512_mask_shldv_epi16(__S, __U, __A, __B);
+}
+
+__m512i test_mm512_maskz_shldv_epi16(__mmask32 __U, __m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shldv_epi16
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.w.512
+ return _mm512_maskz_shldv_epi16(__U, __S, __A, __B);
+}
+
+__m512i test_mm512_shldv_epi16(__m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shldv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.w.512
+ return _mm512_shldv_epi16(__S, __A, __B);
+}
+
+__m512i test_mm512_mask_shrdv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.q.512
+ return _mm512_mask_shrdv_epi64(__S, __U, __A, __B);
+}
+
+__m512i test_mm512_maskz_shrdv_epi64(__mmask8 __U, __m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.q.512
+ return _mm512_maskz_shrdv_epi64(__U, __S, __A, __B);
+}
+
+__m512i test_mm512_shrdv_epi64(__m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.q.512
+ return _mm512_shrdv_epi64(__S, __A, __B);
+}
+
+__m512i test_mm512_mask_shrdv_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.d.512
+ return _mm512_mask_shrdv_epi32(__S, __U, __A, __B);
+}
+
+__m512i test_mm512_maskz_shrdv_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.d.512
+ return _mm512_maskz_shrdv_epi32(__U, __S, __A, __B);
+}
+
+__m512i test_mm512_shrdv_epi32(__m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.d.512
+ return _mm512_shrdv_epi32(__S, __A, __B);
+}
+
+__m512i test_mm512_mask_shrdv_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_mask_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.w.512
+ return _mm512_mask_shrdv_epi16(__S, __U, __A, __B);
+}
+
+__m512i test_mm512_maskz_shrdv_epi16(__mmask32 __U, __m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.w.512
+ return _mm512_maskz_shrdv_epi16(__U, __S, __A, __B);
+}
+
+__m512i test_mm512_shrdv_epi16(__m512i __S, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: @test_mm512_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.w.512
+ return _mm512_shrdv_epi16(__S, __A, __B);
+}
+
Propchange: cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c
------------------------------------------------------------------------------
svn:keywords = Author Date Id Rev URL
Propchange: cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c
------------------------------------------------------------------------------
svn:mime-type = text/plain
Added: cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c?rev=321487&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c (added)
+++ cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c Wed Dec 27 03:25:07 2017
@@ -0,0 +1,604 @@
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <immintrin.h>
+
+__m128i test_mm128_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_mask_compress_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.w.128
+ return _mm128_mask_compress_epi16(__S, __U, __D);
+}
+
+__m128i test_mm128_maskz_compress_epi16(__mmask8 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_maskz_compress_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.w.128
+ return _mm128_maskz_compress_epi16(__U, __D);
+}
+
+__m128i test_mm128_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_mask_compress_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.b.128
+ return _mm128_mask_compress_epi8(__S, __U, __D);
+}
+
+__m128i test_mm128_maskz_compress_epi8(__mmask16 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_maskz_compress_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.b.128
+ return _mm128_maskz_compress_epi8(__U, __D);
+}
+
+void test_mm128_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_mask_compressstoreu_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.store.w.128
+ _mm128_mask_compressstoreu_epi16(__P, __U, __D);
+}
+
+void test_mm128_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_mask_compressstoreu_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.store.b.128
+ _mm128_mask_compressstoreu_epi8(__P, __U, __D);
+}
+
+__m128i test_mm128_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_mask_expand_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.w.128
+ return _mm128_mask_expand_epi16(__S, __U, __D);
+}
+
+__m128i test_mm128_maskz_expand_epi16(__mmask8 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_maskz_expand_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.w.128
+ return _mm128_maskz_expand_epi16(__U, __D);
+}
+
+__m128i test_mm128_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_mask_expand_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.b.128
+ return _mm128_mask_expand_epi8(__S, __U, __D);
+}
+
+__m128i test_mm128_maskz_expand_epi8(__mmask16 __U, __m128i __D) {
+ // CHECK-LABEL: @test_mm128_maskz_expand_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.b.128
+ return _mm128_maskz_expand_epi8(__U, __D);
+}
+
+__m128i test_mm128_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm128_mask_expandloadu_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.load.w.128
+ return _mm128_mask_expandloadu_epi16(__S, __U, __P);
+}
+
+__m128i test_mm128_maskz_expandloadu_epi16(__mmask8 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm128_maskz_expandloadu_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.load.w.128
+ return _mm128_maskz_expandloadu_epi16(__U, __P);
+}
+
+__m128i test_mm128_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm128_mask_expandloadu_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.load.b.128
+ return _mm128_mask_expandloadu_epi8(__S, __U, __P);
+}
+
+__m128i test_mm128_maskz_expandloadu_epi8(__mmask16 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm128_maskz_expandloadu_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.load.b.128
+ return _mm128_maskz_expandloadu_epi8(__U, __P);
+}
+
+__m256i test_mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_mask_compress_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.w.256
+ return _mm256_mask_compress_epi16(__S, __U, __D);
+}
+
+__m256i test_mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_maskz_compress_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.w.256
+ return _mm256_maskz_compress_epi16(__U, __D);
+}
+
+__m256i test_mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_mask_compress_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.b.256
+ return _mm256_mask_compress_epi8(__S, __U, __D);
+}
+
+__m256i test_mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_maskz_compress_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.b.256
+ return _mm256_maskz_compress_epi8(__U, __D);
+}
+
+void test_mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi16
+ // CHECK: @llvm.x86.avx512.mask.compress.store.w.256
+ _mm256_mask_compressstoreu_epi16(__P, __U, __D);
+}
+
+void test_mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi8
+ // CHECK: @llvm.x86.avx512.mask.compress.store.b.256
+ _mm256_mask_compressstoreu_epi8(__P, __U, __D);
+}
+
+__m256i test_mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_mask_expand_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.w.256
+ return _mm256_mask_expand_epi16(__S, __U, __D);
+}
+
+__m256i test_mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_maskz_expand_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.w.256
+ return _mm256_maskz_expand_epi16(__U, __D);
+}
+
+__m256i test_mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_mask_expand_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.b.256
+ return _mm256_mask_expand_epi8(__S, __U, __D);
+}
+
+__m256i test_mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D) {
+ // CHECK-LABEL: @test_mm256_maskz_expand_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.b.256
+ return _mm256_maskz_expand_epi8(__U, __D);
+}
+
+__m256i test_mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm256_mask_expandloadu_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.load.w.256
+ return _mm256_mask_expandloadu_epi16(__S, __U, __P);
+}
+
+__m256i test_mm256_maskz_expandloadu_epi16(__mmask16 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi16
+ // CHECK: @llvm.x86.avx512.mask.expand.load.w.256
+ return _mm256_maskz_expandloadu_epi16(__U, __P);
+}
+
+__m256i test_mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm256_mask_expandloadu_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.load.b.256
+ return _mm256_mask_expandloadu_epi8(__S, __U, __P);
+}
+
+__m256i test_mm256_maskz_expandloadu_epi8(__mmask32 __U, void const* __P) {
+ // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi8
+ // CHECK: @llvm.x86.avx512.mask.expand.load.b.256
+ return _mm256_maskz_expandloadu_epi8(__U, __P);
+}
+
+__m256i test_mm256_mask_shldi_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.256
+ return _mm256_mask_shldi_epi64(__S, __U, __A, __B, 127);
+}
+
+__m256i test_mm256_maskz_shldi_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.256
+ return _mm256_maskz_shldi_epi64(__U, __A, __B, 63);
+}
+
+__m256i test_mm256_shldi_epi64(__m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.256
+ return _mm256_shldi_epi64(__A, __B, 31);
+}
+
+__m128i test_mm128_mask_shldi_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.128
+ return _mm128_mask_shldi_epi64(__S, __U, __A, __B, 127);
+}
+
+__m128i test_mm128_maskz_shldi_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.128
+ return _mm128_maskz_shldi_epi64(__U, __A, __B, 63);
+}
+
+__m128i test_mm128_shldi_epi64(__m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shldi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshld.q.128
+ return _mm128_shldi_epi64(__A, __B, 31);
+}
+
+__m256i test_mm256_mask_shldi_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.256
+ return _mm256_mask_shldi_epi32(__S, __U, __A, __B, 127);
+}
+
+__m256i test_mm256_maskz_shldi_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.256
+ return _mm256_maskz_shldi_epi32(__U, __A, __B, 63);
+}
+
+__m256i test_mm256_shldi_epi32(__m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.256
+ return _mm256_shldi_epi32(__A, __B, 31);
+}
+
+__m128i test_mm128_mask_shldi_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.128
+ return _mm128_mask_shldi_epi32(__S, __U, __A, __B, 127);
+}
+
+__m128i test_mm128_maskz_shldi_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.128
+ return _mm128_maskz_shldi_epi32(__U, __A, __B, 63);
+}
+
+__m128i test_mm128_shldi_epi32(__m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shldi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshld.d.128
+ return _mm128_shldi_epi32(__A, __B, 31);
+}
+
+__m256i test_mm256_mask_shldi_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.256
+ return _mm256_mask_shldi_epi16(__S, __U, __A, __B, 127);
+}
+
+__m256i test_mm256_maskz_shldi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.256
+ return _mm256_maskz_shldi_epi16(__U, __A, __B, 63);
+}
+
+__m256i test_mm256_shldi_epi16(__m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.256
+ return _mm256_shldi_epi16(__A, __B, 31);
+}
+
+__m128i test_mm128_mask_shldi_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.128
+ return _mm128_mask_shldi_epi16(__S, __U, __A, __B, 127);
+}
+
+__m128i test_mm128_maskz_shldi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.128
+ return _mm128_maskz_shldi_epi16(__U, __A, __B, 63);
+}
+
+__m128i test_mm128_shldi_epi16(__m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shldi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshld.w.128
+ return _mm128_shldi_epi16(__A, __B, 31);
+}
+
+__m256i test_mm256_mask_shrdi_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.256
+ return _mm256_mask_shrdi_epi64(__S, __U, __A, __B, 127);
+}
+
+__m256i test_mm256_maskz_shrdi_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.256
+ return _mm256_maskz_shrdi_epi64(__U, __A, __B, 63);
+}
+
+__m256i test_mm256_shrdi_epi64(__m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.256
+ return _mm256_shrdi_epi64(__A, __B, 31);
+}
+
+__m128i test_mm128_mask_shrdi_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.128
+ return _mm128_mask_shrdi_epi64(__S, __U, __A, __B, 127);
+}
+
+__m128i test_mm128_maskz_shrdi_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.128
+ return _mm128_maskz_shrdi_epi64(__U, __A, __B, 63);
+}
+
+__m128i test_mm128_shrdi_epi64(__m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shrdi_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.q.128
+ return _mm128_shrdi_epi64(__A, __B, 31);
+}
+
+__m256i test_mm256_mask_shrdi_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.256
+ return _mm256_mask_shrdi_epi32(__S, __U, __A, __B, 127);
+}
+
+__m256i test_mm256_maskz_shrdi_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.256
+ return _mm256_maskz_shrdi_epi32(__U, __A, __B, 63);
+}
+
+__m256i test_mm256_shrdi_epi32(__m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.256
+ return _mm256_shrdi_epi32(__A, __B, 31);
+}
+
+__m128i test_mm128_mask_shrdi_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.128
+ return _mm128_mask_shrdi_epi32(__S, __U, __A, __B, 127);
+}
+
+__m128i test_mm128_maskz_shrdi_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.128
+ return _mm128_maskz_shrdi_epi32(__U, __A, __B, 63);
+}
+
+__m128i test_mm128_shrdi_epi32(__m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shrdi_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.d.128
+ return _mm128_shrdi_epi32(__A, __B, 31);
+}
+
+__m256i test_mm256_mask_shrdi_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.256
+ return _mm256_mask_shrdi_epi16(__S, __U, __A, __B, 127);
+}
+
+__m256i test_mm256_maskz_shrdi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.256
+ return _mm256_maskz_shrdi_epi16(__U, __A, __B, 63);
+}
+
+__m256i test_mm256_shrdi_epi16(__m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.256
+ return _mm256_shrdi_epi16(__A, __B, 31);
+}
+
+__m128i test_mm128_mask_shrdi_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.128
+ return _mm128_mask_shrdi_epi16(__S, __U, __A, __B, 127);
+}
+
+__m128i test_mm128_maskz_shrdi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.128
+ return _mm128_maskz_shrdi_epi16(__U, __A, __B, 63);
+}
+
+__m128i test_mm128_shrdi_epi16(__m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shrdi_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrd.w.128
+ return _mm128_shrdi_epi16(__A, __B, 31);
+}
+
+__m256i test_mm256_mask_shldv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shldv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.q.256
+ return _mm256_mask_shldv_epi64(__S, __U, __A, __B);
+}
+
+__m256i test_mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shldv_epi64
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.q.256
+ return _mm256_maskz_shldv_epi64(__U, __S, __A, __B);
+}
+
+__m256i test_mm256_shldv_epi64(__m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shldv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.q.256
+ return _mm256_shldv_epi64(__S, __A, __B);
+}
+
+__m128i test_mm128_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shldv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.q.128
+ return _mm128_mask_shldv_epi64(__S, __U, __A, __B);
+}
+
+__m128i test_mm128_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shldv_epi64
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.q.128
+ return _mm128_maskz_shldv_epi64(__U, __S, __A, __B);
+}
+
+__m128i test_mm128_shldv_epi64(__m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shldv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.q.128
+ return _mm128_shldv_epi64(__S, __A, __B);
+}
+
+__m256i test_mm256_mask_shldv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shldv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.d.256
+ return _mm256_mask_shldv_epi32(__S, __U, __A, __B);
+}
+
+__m256i test_mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shldv_epi32
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.d.256
+ return _mm256_maskz_shldv_epi32(__U, __S, __A, __B);
+}
+
+__m256i test_mm256_shldv_epi32(__m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shldv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.d.256
+ return _mm256_shldv_epi32(__S, __A, __B);
+}
+
+__m128i test_mm128_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shldv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.d.128
+ return _mm128_mask_shldv_epi32(__S, __U, __A, __B);
+}
+
+__m128i test_mm128_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shldv_epi32
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.d.128
+ return _mm128_maskz_shldv_epi32(__U, __S, __A, __B);
+}
+
+__m128i test_mm128_shldv_epi32(__m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shldv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.d.128
+ return _mm128_shldv_epi32(__S, __A, __B);
+}
+
+__m256i test_mm256_mask_shldv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shldv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.w.256
+ return _mm256_mask_shldv_epi16(__S, __U, __A, __B);
+}
+
+__m256i test_mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shldv_epi16
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.w.256
+ return _mm256_maskz_shldv_epi16(__U, __S, __A, __B);
+}
+
+__m256i test_mm256_shldv_epi16(__m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shldv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.w.256
+ return _mm256_shldv_epi16(__S, __A, __B);
+}
+
+__m128i test_mm128_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shldv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.w.128
+ return _mm128_mask_shldv_epi16(__S, __U, __A, __B);
+}
+
+__m128i test_mm128_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shldv_epi16
+ // CHECK: @llvm.x86.avx512.maskz.vpshldv.w.128
+ return _mm128_maskz_shldv_epi16(__U, __S, __A, __B);
+}
+
+__m128i test_mm128_shldv_epi16(__m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shldv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshldv.w.128
+ return _mm128_shldv_epi16(__S, __A, __B);
+}
+
+__m256i test_mm256_mask_shrdv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.q.256
+ return _mm256_mask_shrdv_epi64(__S, __U, __A, __B);
+}
+
+__m256i test_mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.q.256
+ return _mm256_maskz_shrdv_epi64(__U, __S, __A, __B);
+}
+
+__m256i test_mm256_shrdv_epi64(__m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.q.256
+ return _mm256_shrdv_epi64(__S, __A, __B);
+}
+
+__m128i test_mm128_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.q.128
+ return _mm128_mask_shrdv_epi64(__S, __U, __A, __B);
+}
+
+__m128i test_mm128_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.q.128
+ return _mm128_maskz_shrdv_epi64(__U, __S, __A, __B);
+}
+
+__m128i test_mm128_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shrdv_epi64
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.q.128
+ return _mm128_shrdv_epi64(__S, __A, __B);
+}
+
+__m256i test_mm256_mask_shrdv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.d.256
+ return _mm256_mask_shrdv_epi32(__S, __U, __A, __B);
+}
+
+__m256i test_mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.d.256
+ return _mm256_maskz_shrdv_epi32(__U, __S, __A, __B);
+}
+
+__m256i test_mm256_shrdv_epi32(__m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.d.256
+ return _mm256_shrdv_epi32(__S, __A, __B);
+}
+
+__m128i test_mm128_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.d.128
+ return _mm128_mask_shrdv_epi32(__S, __U, __A, __B);
+}
+
+__m128i test_mm128_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.d.128
+ return _mm128_maskz_shrdv_epi32(__U, __S, __A, __B);
+}
+
+__m128i test_mm128_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shrdv_epi32
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.d.128
+ return _mm128_shrdv_epi32(__S, __A, __B);
+}
+
+__m256i test_mm256_mask_shrdv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.w.256
+ return _mm256_mask_shrdv_epi16(__S, __U, __A, __B);
+}
+
+__m256i test_mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.w.256
+ return _mm256_maskz_shrdv_epi16(__U, __S, __A, __B);
+}
+
+__m256i test_mm256_shrdv_epi16(__m256i __S, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.w.256
+ return _mm256_shrdv_epi16(__S, __A, __B);
+}
+
+__m128i test_mm128_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_mask_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.w.128
+ return _mm128_mask_shrdv_epi16(__S, __U, __A, __B);
+}
+
+__m128i test_mm128_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_maskz_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.maskz.vpshrdv.w.128
+ return _mm128_maskz_shrdv_epi16(__U, __S, __A, __B);
+}
+
+__m128i test_mm128_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm128_shrdv_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpshrdv.w.128
+ return _mm128_shrdv_epi16(__S, __A, __B);
+}
+
Propchange: cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c
------------------------------------------------------------------------------
svn:keywords = Author Date Id Rev URL
Propchange: cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c
------------------------------------------------------------------------------
svn:mime-type = text/plain
Modified: cfe/trunk/test/Driver/x86-target-features.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Driver/x86-target-features.c?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/test/Driver/x86-target-features.c (original)
+++ cfe/trunk/test/Driver/x86-target-features.c Wed Dec 27 03:25:07 2017
@@ -20,10 +20,10 @@
// SSE4-AES: "-target-feature" "+sse4.2" "-target-feature" "+aes"
// NO-SSE4-AES: "-target-feature" "-sse4.1" "-target-feature" "-aes"
-// RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mavx -mavx2 -mavx512f -mavx512cd -mavx512er -mavx512pf -mavx512dq -mavx512bw -mavx512vl -mavx512vbmi -mavx512ifma %s -### -o %t.o 2>&1 | FileCheck -check-prefix=AVX %s
-// RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mno-avx -mno-avx2 -mno-avx512f -mno-avx512cd -mno-avx512er -mno-avx512pf -mno-avx512dq -mno-avx512bw -mno-avx512vl -mno-avx512vbmi -mno-avx512ifma %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-AVX %s
-// AVX: "-target-feature" "+avx" "-target-feature" "+avx2" "-target-feature" "+avx512f" "-target-feature" "+avx512cd" "-target-feature" "+avx512er" "-target-feature" "+avx512pf" "-target-feature" "+avx512dq" "-target-feature" "+avx512bw" "-target-feature" "+avx512vl" "-target-feature" "+avx512vbmi" "-target-feature" "+avx512ifma"
-// NO-AVX: "-target-feature" "-avx" "-target-feature" "-avx2" "-target-feature" "-avx512f" "-target-feature" "-avx512cd" "-target-feature" "-avx512er" "-target-feature" "-avx512pf" "-target-feature" "-avx512dq" "-target-feature" "-avx512bw" "-target-feature" "-avx512vl" "-target-feature" "-avx512vbmi" "-target-feature" "-avx512ifma"
+// RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mavx -mavx2 -mavx512f -mavx512cd -mavx512er -mavx512pf -mavx512dq -mavx512bw -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512ifma %s -### -o %t.o 2>&1 | FileCheck -check-prefix=AVX %s
+// RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mno-avx -mno-avx2 -mno-avx512f -mno-avx512cd -mno-avx512er -mno-avx512pf -mno-avx512dq -mno-avx512bw -mno-avx512vl -mno-avx512vbmi -mno-avx512vbmi2 -mno-avx512ifma %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-AVX %s
+// AVX: "-target-feature" "+avx" "-target-feature" "+avx2" "-target-feature" "+avx512f" "-target-feature" "+avx512cd" "-target-feature" "+avx512er" "-target-feature" "+avx512pf" "-target-feature" "+avx512dq" "-target-feature" "+avx512bw" "-target-feature" "+avx512vl" "-target-feature" "+avx512vbmi" "-target-feature" "+avx512vbmi2" "-target-feature" "+avx512ifma"
+// NO-AVX: "-target-feature" "-avx" "-target-feature" "-avx2" "-target-feature" "-avx512f" "-target-feature" "-avx512cd" "-target-feature" "-avx512er" "-target-feature" "-avx512pf" "-target-feature" "-avx512dq" "-target-feature" "-avx512bw" "-target-feature" "-avx512vl" "-target-feature" "-avx512vbmi" "-target-feature" "-avx512vbmi2" "-target-feature" "-avx512ifma"
// RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mpclmul -mrdrnd -mfsgsbase -mbmi -mbmi2 %s -### -o %t.o 2>&1 | FileCheck -check-prefix=BMI %s
// RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mno-pclmul -mno-rdrnd -mno-fsgsbase -mno-bmi -mno-bmi2 %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-BMI %s
@@ -120,3 +120,8 @@
// VNNI: "-target-feature" "+avx512vnni"
// NO-VNNI: "-target-feature" "-avx512vnni"
+// RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mavx512vbmi2 %s -### -o %t.o 2>&1 | FileCheck -check-prefix=VBMI2 %s
+// RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mno-avx512vbmi2 %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-VBMI2 %s
+// VBMI2: "-target-feature" "+avx512vbmi2"
+// NO-VBMI2: "-target-feature" "-avx512vbmi2"
+
Modified: cfe/trunk/test/Preprocessor/predefined-arch-macros.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Preprocessor/predefined-arch-macros.c?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/test/Preprocessor/predefined-arch-macros.c (original)
+++ cfe/trunk/test/Preprocessor/predefined-arch-macros.c Wed Dec 27 03:25:07 2017
@@ -1056,6 +1056,7 @@
// CHECK_ICL_M32: #define __AVX512DQ__ 1
// CHECK_ICL_M32: #define __AVX512F__ 1
// CHECK_ICL_M32: #define __AVX512IFMA__ 1
+// CHECK_ICL_M32: #define __AVX512VBMI2__ 1
// CHECK_ICL_M32: #define __AVX512VBMI__ 1
// CHECK_ICL_M32: #define __AVX512VL__ 1
// CHECK_ICL_M32: #define __AVX512VNNI__ 1
@@ -1106,6 +1107,7 @@
// CHECK_ICL_M64: #define __AVX512DQ__ 1
// CHECK_ICL_M64: #define __AVX512F__ 1
// CHECK_ICL_M64: #define __AVX512IFMA__ 1
+// CHECK_ICL_M64: #define __AVX512VBMI2__ 1
// CHECK_ICL_M64: #define __AVX512VBMI__ 1
// CHECK_ICL_M64: #define __AVX512VL__ 1
// CHECK_ICL_M64: #define __AVX512VNNI__ 1
Modified: cfe/trunk/test/Preprocessor/x86_target_features.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Preprocessor/x86_target_features.c?rev=321487&r1=321486&r2=321487&view=diff
==============================================================================
--- cfe/trunk/test/Preprocessor/x86_target_features.c (original)
+++ cfe/trunk/test/Preprocessor/x86_target_features.c Wed Dec 27 03:25:07 2017
@@ -231,6 +231,27 @@
// AVX512VBMINOAVX512BW-NOT: #define __AVX512BW__ 1
// AVX512VBMINOAVX512BW-NOT: #define __AVX512VBMI__ 1
+// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512vbmi2 -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512VBMI2 %s
+
+// AVX512VBMI2: #define __AVX2__ 1
+// AVX512VBMI2: #define __AVX512BW__ 1
+// AVX512VBMI2: #define __AVX512F__ 1
+// AVX512VBMI2: #define __AVX512VBMI2__ 1
+// AVX512VBMI2: #define __AVX__ 1
+// AVX512VBMI2: #define __SSE2_MATH__ 1
+// AVX512VBMI2: #define __SSE2__ 1
+// AVX512VBMI2: #define __SSE3__ 1
+// AVX512VBMI2: #define __SSE4_1__ 1
+// AVX512VBMI2: #define __SSE4_2__ 1
+// AVX512VBMI2: #define __SSE_MATH__ 1
+// AVX512VBMI2: #define __SSE__ 1
+// AVX512VBMI2: #define __SSSE3__ 1
+
+// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512vbmi2 -mno-avx512bw -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512VBMI2NOAVX512BW %s
+
+// AVX512VBMI2NOAVX512BW-NOT: #define __AVX512BW__ 1
+// AVX512VBMI2NOAVX512BW-NOT: #define __AVX512VBMI2__ 1
+
// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512bitalg -mno-avx512bw -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512BITALGNOAVX512BW %s
// AVX512BITALGNOAVX512BW-NOT: #define __AVX512BITALG__ 1
More information about the cfe-commits
mailing list