[llvm] r333386 - [X86] Add unmasked vermi2var intrinsics so we can use explicit select instructions for masking in clang.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon May 28 20:26:30 PDT 2018


Author: ctopper
Date: Mon May 28 20:26:30 2018
New Revision: 333386

URL: http://llvm.org/viewvc/llvm-project?rev=333386&view=rev
Log:
[X86] Add unmasked vermi2var intrinsics so we can use explicit select instructions for masking in clang.

This will allow us to remove the 3 different flavors of masked intrinsics. I'm leaving the actual intrinsic removal for another patch.

Added:
    llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll
Modified:
    llvm/trunk/include/llvm/IR/IntrinsicsX86.td
    llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll

Modified: llvm/trunk/include/llvm/IR/IntrinsicsX86.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsX86.td?rev=333386&r1=333385&r2=333386&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsX86.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td Mon May 28 20:26:30 2018
@@ -1012,326 +1012,370 @@ let TargetPrefix = "x86" in {  // All in
         GCCBuiltin<"__builtin_ia32_vpermilvarps256">,
         Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8i32_ty], [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_d_128 :
-       GCCBuiltin<"__builtin_ia32_vpermi2vard128_mask">,
+  def int_x86_avx512_mask_vpermi2var_d_128 : // FIXME: Remove
         Intrinsic<[llvm_v4i32_ty],
         [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
         [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpermi2vard256_mask">,
+  def int_x86_avx512_mask_vpermi2var_d_256 : // FIXME: Remove
           Intrinsic<[llvm_v8i32_ty],
           [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_d_512 :
-        GCCBuiltin<"__builtin_ia32_vpermi2vard512_mask">,
+  def int_x86_avx512_mask_vpermi2var_d_512 : // FIXME: Remove
           Intrinsic<[llvm_v16i32_ty],
           [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_hi_128 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varhi128_mask">,
+  def int_x86_avx512_mask_vpermi2var_hi_128 : // FIXME: Remove
           Intrinsic<[llvm_v8i16_ty],
           [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_hi_256 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varhi256_mask">,
+  def int_x86_avx512_mask_vpermi2var_hi_256 : // FIXME: Remove
           Intrinsic<[llvm_v16i16_ty],
           [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_hi_512 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varhi512_mask">,
+  def int_x86_avx512_mask_vpermi2var_hi_512 : // FIXME: Remove
           Intrinsic<[llvm_v32i16_ty],
           [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_pd_128 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varpd128_mask">,
+  def int_x86_avx512_mask_vpermi2var_pd_128 : // FIXME: Remove
           Intrinsic<[llvm_v2f64_ty],
           [llvm_v2f64_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_pd_256 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varpd256_mask">,
+  def int_x86_avx512_mask_vpermi2var_pd_256 : // FIXME: Remove
           Intrinsic<[llvm_v4f64_ty],
           [llvm_v4f64_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_pd_512 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varpd512_mask">,
+  def int_x86_avx512_mask_vpermi2var_pd_512 : // FIXME: Remove
           Intrinsic<[llvm_v8f64_ty],
           [llvm_v8f64_ty, llvm_v8i64_ty, llvm_v8f64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_ps_128 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varps128_mask">,
+  def int_x86_avx512_mask_vpermi2var_ps_128 : // FIXME: Remove
           Intrinsic<[llvm_v4f32_ty],
           [llvm_v4f32_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_ps_256 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varps256_mask">,
+  def int_x86_avx512_mask_vpermi2var_ps_256 : // FIXME: Remove
           Intrinsic<[llvm_v8f32_ty],
           [llvm_v8f32_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_ps_512 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varps512_mask">,
+  def int_x86_avx512_mask_vpermi2var_ps_512 : // FIXME: Remove
           Intrinsic<[llvm_v16f32_ty],
           [llvm_v16f32_ty, llvm_v16i32_ty, llvm_v16f32_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varq128_mask">,
+  def int_x86_avx512_mask_vpermi2var_q_128 : // FIXME: Remove
           Intrinsic<[llvm_v2i64_ty],
           [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varq256_mask">,
+  def int_x86_avx512_mask_vpermi2var_q_256 : // FIXME: Remove
           Intrinsic<[llvm_v4i64_ty],
           [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_q_512 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varq512_mask">,
+  def int_x86_avx512_mask_vpermi2var_q_512 : // FIXME: Remove
           Intrinsic<[llvm_v8i64_ty],
           [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_d_512:
-        GCCBuiltin<"__builtin_ia32_vpermt2vard512_mask">,
+  def int_x86_avx512_mask_vpermt2var_d_512: // FIXME: Remove
         Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
                   llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_q_512:
-        GCCBuiltin<"__builtin_ia32_vpermt2varq512_mask">,
+  def int_x86_avx512_mask_vpermt2var_q_512: // FIXME: Remove
         Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
                   llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_ps_512:
-        GCCBuiltin<"__builtin_ia32_vpermt2varps512_mask">,
+  def int_x86_avx512_mask_vpermt2var_ps_512: // FIXME: Remove
         Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty,
                   llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_pd_512:
-        GCCBuiltin<"__builtin_ia32_vpermt2varpd512_mask">,
+  def int_x86_avx512_mask_vpermt2var_pd_512: // FIXME: Remove
         Intrinsic<[llvm_v8f64_ty], [llvm_v8i64_ty,
                   llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_d_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2vard128_mask">,
+  def int_x86_avx512_mask_vpermt2var_d_128 : // FIXME: Remove
           Intrinsic<[llvm_v4i32_ty],
           [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_d_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2vard128_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_d_128 : // FIXME: Remove
           Intrinsic<[llvm_v4i32_ty],
           [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2vard256_mask">,
+  def int_x86_avx512_mask_vpermt2var_d_256 : // FIXME: Remove
           Intrinsic<[llvm_v8i32_ty],
           [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2vard256_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_d_256 : // FIXME: Remove
           Intrinsic<[llvm_v8i32_ty],
           [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_d_512 :
-        GCCBuiltin<"__builtin_ia32_vpermt2vard512_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_d_512 : // FIXME: Remove
           Intrinsic<[llvm_v16i32_ty],
           [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_hi_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varhi128_mask">,
+  def int_x86_avx512_mask_vpermt2var_hi_128 : // FIXME: Remove
           Intrinsic<[llvm_v8i16_ty],
           [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_hi_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varhi128_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_hi_128 : // FIXME: Remove
           Intrinsic<[llvm_v8i16_ty],
           [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_hi_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varhi256_mask">,
+  def int_x86_avx512_mask_vpermt2var_hi_256 : // FIXME: Remove
           Intrinsic<[llvm_v16i16_ty],
           [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_hi_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varhi256_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_hi_256 : // FIXME: Remove
           Intrinsic<[llvm_v16i16_ty],
           [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_hi_512 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varhi512_mask">,
+  def int_x86_avx512_mask_vpermt2var_hi_512 : // FIXME: Remove
           Intrinsic<[llvm_v32i16_ty],
           [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_hi_512 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varhi512_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_hi_512 : // FIXME: Remove
           Intrinsic<[llvm_v32i16_ty],
           [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_pd_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varpd128_mask">,
+  def int_x86_avx512_mask_vpermt2var_pd_128 : // FIXME: Remove
           Intrinsic<[llvm_v2f64_ty],
           [llvm_v2i64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_pd_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varpd128_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_pd_128 : // FIXME: Remove
           Intrinsic<[llvm_v2f64_ty],
           [llvm_v2i64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_pd_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varpd256_mask">,
+  def int_x86_avx512_mask_vpermt2var_pd_256 : // FIXME: Remove
           Intrinsic<[llvm_v4f64_ty],
           [llvm_v4i64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_pd_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varpd256_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_pd_256 : // FIXME: Remove
           Intrinsic<[llvm_v4f64_ty],
           [llvm_v4i64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_pd_512 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varpd512_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_pd_512 : // FIXME: Remove
           Intrinsic<[llvm_v8f64_ty],
           [llvm_v8i64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_ps_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varps128_mask">,
+  def int_x86_avx512_mask_vpermt2var_ps_128 : // FIXME: Remove
           Intrinsic<[llvm_v4f32_ty],
           [llvm_v4i32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_ps_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varps128_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_ps_128 : // FIXME: Remove
           Intrinsic<[llvm_v4f32_ty],
           [llvm_v4i32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_ps_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varps256_mask">,
+  def int_x86_avx512_mask_vpermt2var_ps_256 : // FIXME: Remove
           Intrinsic<[llvm_v8f32_ty],
           [llvm_v8i32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_ps_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varps256_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_ps_256 : // FIXME: Remove
           Intrinsic<[llvm_v8f32_ty],
           [llvm_v8i32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_ps_512 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varps512_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_ps_512 : // FIXME: Remove
           Intrinsic<[llvm_v16f32_ty],
           [llvm_v16i32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varq128_mask">,
+  def int_x86_avx512_mask_vpermt2var_q_128 : // FIXME: Remove
           Intrinsic<[llvm_v2i64_ty],
           [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varq128_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_q_128 : // FIXME: Remove
           Intrinsic<[llvm_v2i64_ty],
           [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varq256_mask">,
+  def int_x86_avx512_mask_vpermt2var_q_256 : // FIXME: Remove
           Intrinsic<[llvm_v4i64_ty],
           [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varq256_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_q_256 : // FIXME: Remove
           Intrinsic<[llvm_v4i64_ty],
           [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_q_512 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varq512_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_q_512 : // FIXME: Remove
           Intrinsic<[llvm_v8i64_ty],
           [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_qi_128 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varqi128_mask">,
+  def int_x86_avx512_mask_vpermi2var_qi_128 : // FIXME: Remove
           Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
           llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_qi_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varqi128_mask">,
+  def int_x86_avx512_mask_vpermt2var_qi_128 : // FIXME: Remove
           Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
           llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_qi_128 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varqi128_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_qi_128 : // FIXME: Remove
           Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
           llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_qi_256 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varqi256_mask">,
+  def int_x86_avx512_mask_vpermi2var_qi_256 : // FIXME: Remove
           Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
           llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_qi_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varqi256_mask">,
+  def int_x86_avx512_mask_vpermt2var_qi_256 : // FIXME: Remove
           Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
           llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_qi_256 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varqi256_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_qi_256 : // FIXME: Remove
           Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
           llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermi2var_qi_512 :
-        GCCBuiltin<"__builtin_ia32_vpermi2varqi512_mask">,
+  def int_x86_avx512_mask_vpermi2var_qi_512 : // FIXME: Remove
           Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
           llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_mask_vpermt2var_qi_512 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varqi512_mask">,
+  def int_x86_avx512_mask_vpermt2var_qi_512 : // FIXME: Remove
           Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
           llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
           [IntrNoMem]>;
 
-  def int_x86_avx512_maskz_vpermt2var_qi_512 :
-        GCCBuiltin<"__builtin_ia32_vpermt2varqi512_maskz">,
+  def int_x86_avx512_maskz_vpermt2var_qi_512 : // FIXME: Remove
           Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
           llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
           [IntrNoMem]>;
 
+
+
+
+
+  def int_x86_avx512_vpermi2var_d_128 :
+       GCCBuiltin<"__builtin_ia32_vpermi2vard128">,
+       Intrinsic<[llvm_v4i32_ty],
+                 [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2vard256">,
+        Intrinsic<[llvm_v8i32_ty],
+                  [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2vard512">,
+        Intrinsic<[llvm_v16i32_ty],
+                  [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty],
+                  [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_hi_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varhi128">,
+        Intrinsic<[llvm_v8i16_ty],
+                  [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_hi_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varhi256">,
+        Intrinsic<[llvm_v16i16_ty],
+                  [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty],
+                  [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_hi_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varhi512">,
+        Intrinsic<[llvm_v32i16_ty],
+                  [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty],
+                  [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_pd_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varpd128">,
+        Intrinsic<[llvm_v2f64_ty],
+                  [llvm_v2f64_ty, llvm_v2i64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_pd_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varpd256">,
+        Intrinsic<[llvm_v4f64_ty],
+                  [llvm_v4f64_ty, llvm_v4i64_ty, llvm_v4f64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_pd_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varpd512">,
+        Intrinsic<[llvm_v8f64_ty],
+                  [llvm_v8f64_ty, llvm_v8i64_ty, llvm_v8f64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_ps_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varps128">,
+        Intrinsic<[llvm_v4f32_ty],
+                  [llvm_v4f32_ty, llvm_v4i32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_ps_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varps256">,
+        Intrinsic<[llvm_v8f32_ty],
+                  [llvm_v8f32_ty, llvm_v8i32_ty, llvm_v8f32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_ps_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varps512">,
+        Intrinsic<[llvm_v16f32_ty],
+                  [llvm_v16f32_ty, llvm_v16i32_ty, llvm_v16f32_ty],
+                  [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varq128">,
+        Intrinsic<[llvm_v2i64_ty],
+                  [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varq256">,
+        Intrinsic<[llvm_v4i64_ty],
+                  [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varq512">,
+        Intrinsic<[llvm_v8i64_ty],
+                  [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_qi_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varqi128">,
+        Intrinsic<[llvm_v16i8_ty],
+                  [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_qi_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varqi256">,
+        Intrinsic<[llvm_v32i8_ty],
+                  [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_vpermi2var_qi_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varqi512">,
+        Intrinsic<[llvm_v64i8_ty],
+                  [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>;
+
   def int_x86_avx512_vpermilvar_pd_512 :
         GCCBuiltin<"__builtin_ia32_vpermilvarpd512">,
           Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8i64_ty],

Modified: llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h?rev=333386&r1=333385&r2=333386&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h (original)
+++ llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h Mon May 28 20:26:30 2018
@@ -1435,6 +1435,24 @@ static const IntrinsicData  IntrinsicsWi
   X86_INTRINSIC_DATA(avx512_vcvtss2si64, INTR_TYPE_2OP, X86ISD::CVTS2SI_RND, 0),
   X86_INTRINSIC_DATA(avx512_vcvtss2usi32, INTR_TYPE_2OP, X86ISD::CVTS2UI_RND, 0),
   X86_INTRINSIC_DATA(avx512_vcvtss2usi64, INTR_TYPE_2OP, X86ISD::CVTS2UI_RND, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_d_128, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_d_256, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_d_512, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_hi_128, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_hi_256, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_hi_512, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_pd_128, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_pd_256, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_pd_512, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_ps_128, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_ps_256, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_ps_512, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_q_128, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_q_256, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_q_512, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_qi_128, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_qi_256, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
+  X86_INTRINSIC_DATA(avx512_vpermi2var_qi_512, INTR_TYPE_3OP, X86ISD::VPERMV3, 0),
   X86_INTRINSIC_DATA(avx512_vpermilvar_pd_512, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
   X86_INTRINSIC_DATA(avx512_vpermilvar_ps_512, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
   X86_INTRINSIC_DATA(avx512_vpmadd52h_uq_128 , IFMA_OP, X86ISD::VPMADD52H, 0),

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll?rev=333386&r1=333385&r2=333386&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll Mon May 28 20:26:30 2018
@@ -2478,5 +2478,338 @@ entry:
   ret <8 x i64> %2
 }
 
+declare <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32>, <16 x i32>, <16 x i32>)
+
+define <8 x i64> @test_mm512_mask2_permutex2var_epi32(<8 x i64> %__A, <8 x i64> %__I, i16 zeroext %__U, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask2_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermi2d %zmm2, %zmm0, %zmm1 {%k1}
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask2_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2d %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <16 x i32>
+  %1 = bitcast <8 x i64> %__I to <16 x i32>
+  %2 = bitcast <8 x i64> %__B to <16 x i32>
+  %3 = tail call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i32> %3, <16 x i32> %1
+  %6 = bitcast <16 x i32> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+declare <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double>, <8 x i64>, <8 x double>)
+
+define <8 x double> @test_mm512_mask2_permutex2var_pd(<8 x double> %__A, <8 x i64> %__I, i8 zeroext %__U, <8 x double> %__B) {
+; X32-LABEL: test_mm512_mask2_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1 {%k1}
+; X32-NEXT:    vmovapd %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask2_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovapd %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %__A, <8 x i64> %__I, <8 x double> %__B)
+  %1 = bitcast <8 x i64> %__I to <8 x double>
+  %2 = bitcast i8 %__U to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x double> %0, <8 x double> %1
+  ret <8 x double> %3
+}
+
+declare <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float>, <16 x i32>, <16 x float>)
+
+define <16 x float> @test_mm512_mask2_permutex2var_ps(<16 x float> %__A, <8 x i64> %__I, i16 zeroext %__U, <16 x float> %__B) {
+; X32-LABEL: test_mm512_mask2_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
+; X32-NEXT:    vmovaps %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask2_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovaps %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__I to <16 x i32>
+  %1 = tail call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %__A, <16 x i32> %0, <16 x float> %__B)
+  %2 = bitcast <8 x i64> %__I to <16 x float>
+  %3 = bitcast i16 %__U to <16 x i1>
+  %4 = select <16 x i1> %3, <16 x float> %1, <16 x float> %2
+  ret <16 x float> %4
+}
+
+declare <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64>, <8 x i64>, <8 x i64>)
+
+define <8 x i64> @test_mm512_mask2_permutex2var_epi64(<8 x i64> %__A, <8 x i64> %__I, i8 zeroext %__U, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask2_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1 {%k1}
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask2_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__I
+  ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_permutex2var_epi32(<8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <16 x i32>
+  %1 = bitcast <8 x i64> %__I to <16 x i32>
+  %2 = bitcast <8 x i64> %__B to <16 x i32>
+  %3 = tail call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
+  %4 = bitcast <16 x i32> %3 to <8 x i64>
+  ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_maskz_permutex2var_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <16 x i32>
+  %1 = bitcast <8 x i64> %__I to <16 x i32>
+  %2 = bitcast <8 x i64> %__B to <16 x i32>
+  %3 = tail call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i32> %3, <16 x i32> zeroinitializer
+  %6 = bitcast <16 x i32> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+define <8 x i64> @test_mm512_mask_permutex2var_epi32(<8 x i64> %__A, i16 zeroext %__U, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <16 x i32>
+  %1 = bitcast <8 x i64> %__I to <16 x i32>
+  %2 = bitcast <8 x i64> %__B to <16 x i32>
+  %3 = tail call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i32> %3, <16 x i32> %0
+  %6 = bitcast <16 x i32> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+define <8 x double> @test_mm512_permutex2var_pd(<8 x double> %__A, <8 x i64> %__I, <8 x double> %__B) {
+; X32-LABEL: test_mm512_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %__A, <8 x i64> %__I, <8 x double> %__B)
+  ret <8 x double> %0
+}
+
+define <8 x double> @test_mm512_mask_permutex2var_pd(<8 x double> %__A, i8 zeroext %__U, <8 x i64> %__I, <8 x double> %__B) {
+; X32-LABEL: test_mm512_mask_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %__A, <8 x i64> %__I, <8 x double> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
+  ret <8 x double> %2
+}
+
+define <8 x double> @test_mm512_maskz_permutex2var_pd(i8 zeroext %__U, <8 x double> %__A, <8 x i64> %__I, <8 x double> %__B) {
+; X32-LABEL: test_mm512_maskz_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %__A, <8 x i64> %__I, <8 x double> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer
+  ret <8 x double> %2
+}
+
+define <16 x float> @test_mm512_permutex2var_ps(<16 x float> %__A, <8 x i64> %__I, <16 x float> %__B) {
+; X32-LABEL: test_mm512_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__I to <16 x i32>
+  %1 = tail call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %__A, <16 x i32> %0, <16 x float> %__B)
+  ret <16 x float> %1
+}
+
+define <16 x float> @test_mm512_mask_permutex2var_ps(<16 x float> %__A, i16 zeroext %__U, <8 x i64> %__I, <16 x float> %__B) {
+; X32-LABEL: test_mm512_mask_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__I to <16 x i32>
+  %1 = tail call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %__A, <16 x i32> %0, <16 x float> %__B)
+  %2 = bitcast i16 %__U to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %__A
+  ret <16 x float> %3
+}
+
+define <16 x float> @test_mm512_maskz_permutex2var_ps(i16 zeroext %__U, <16 x float> %__A, <8 x i64> %__I, <16 x float> %__B) {
+; X32-LABEL: test_mm512_maskz_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__I to <16 x i32>
+  %1 = tail call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %__A, <16 x i32> %0, <16 x float> %__B)
+  %2 = bitcast i16 %__U to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
+  ret <16 x float> %3
+}
+
+define <8 x i64> @test_mm512_permutex2var_epi64(<8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B)
+  ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_mask_permutex2var_epi64(<8 x i64> %__A, i8 zeroext %__U, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__A
+  ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_maskz_permutex2var_epi64(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
+  ret <8 x i64> %2
+}
+
+
 !0 = !{i32 1}
 

Modified: llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll?rev=333386&r1=333385&r2=333386&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll Mon May 28 20:26:30 2018
@@ -861,5 +861,97 @@ entry:
   ret <4 x i64> %3
 }
 
+define <8 x i64> @test_mm512_mask2_permutex2var_epi16(<8 x i64> %__A, <8 x i64> %__I, i32 %__U, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask2_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1 {%k1}
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask2_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <32 x i16>
+  %1 = bitcast <8 x i64> %__I to <32 x i16>
+  %2 = bitcast <8 x i64> %__B to <32 x i16>
+  %3 = tail call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> %1
+  %6 = bitcast <32 x i16> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+define <8 x i64> @test_mm512_permutex2var_epi16(<8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2w %zmm2, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2w %zmm2, %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <32 x i16>
+  %1 = bitcast <8 x i64> %__I to <32 x i16>
+  %2 = bitcast <8 x i64> %__B to <32 x i16>
+  %3 = tail call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2)
+  %4 = bitcast <32 x i16> %3 to <8 x i64>
+  ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_mask_permutex2var_epi16(<8 x i64> %__A, i32 %__U, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2w %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2w %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <32 x i16>
+  %1 = bitcast <8 x i64> %__I to <32 x i16>
+  %2 = bitcast <8 x i64> %__B to <32 x i16>
+  %3 = tail call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> %0
+  %6 = bitcast <32 x i16> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+define <8 x i64> @test_mm512_maskz_permutex2var_epi16(i32 %__U, <8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2w %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2w %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <32 x i16>
+  %1 = bitcast <8 x i64> %__I to <32 x i16>
+  %2 = bitcast <8 x i64> %__B to <32 x i16>
+  %3 = tail call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> zeroinitializer
+  %6 = bitcast <32 x i16> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+declare <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16>, <32 x i16>, <32 x i16>)
+
 !0 = !{i32 1}
 

Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll?rev=333386&r1=333385&r2=333386&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll Mon May 28 20:26:30 2018
@@ -877,5 +877,191 @@ entry:
   ret <2 x i64> %3
 }
 
+define <2 x i64> @test_mm_mask2_permutex2var_epi16(<2 x i64> %__A, <2 x i64> %__I, i8 zeroext %__U, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask2_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vpermi2w %xmm2, %xmm0, %xmm1 {%k1}
+; X32-NEXT:    vmovdqa %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask2_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermi2w %xmm2, %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovdqa %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <8 x i16>
+  %1 = bitcast <2 x i64> %__I to <8 x i16>
+  %2 = bitcast <2 x i64> %__B to <8 x i16>
+  %3 = tail call <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %1
+  %6 = bitcast <8 x i16> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <4 x i64> @test_mm256_mask2_permutex2var_epi16(<4 x i64> %__A, <4 x i64> %__I, i16 zeroext %__U, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask2_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermi2w %ymm2, %ymm0, %ymm1 {%k1}
+; X32-NEXT:    vmovdqa %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask2_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermi2w %ymm2, %ymm0, %ymm1 {%k1}
+; X64-NEXT:    vmovdqa %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <16 x i16>
+  %1 = bitcast <4 x i64> %__I to <16 x i16>
+  %2 = bitcast <4 x i64> %__B to <16 x i16>
+  %3 = tail call <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %1
+  %6 = bitcast <16 x i16> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+define <2 x i64> @test_mm_permutex2var_epi16(<2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2w %xmm2, %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2w %xmm2, %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <8 x i16>
+  %1 = bitcast <2 x i64> %__I to <8 x i16>
+  %2 = bitcast <2 x i64> %__B to <8 x i16>
+  %3 = tail call <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  %4 = bitcast <8 x i16> %3 to <2 x i64>
+  ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_mask_permutex2var_epi16(<2 x i64> %__A, i8 zeroext %__U, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vpermt2w %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2w %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <8 x i16>
+  %1 = bitcast <2 x i64> %__I to <8 x i16>
+  %2 = bitcast <2 x i64> %__B to <8 x i16>
+  %3 = tail call <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %0
+  %6 = bitcast <8 x i16> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <2 x i64> @test_mm_maskz_permutex2var_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vpermt2w %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2w %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <8 x i16>
+  %1 = bitcast <2 x i64> %__I to <8 x i16>
+  %2 = bitcast <2 x i64> %__B to <8 x i16>
+  %3 = tail call <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
+  %6 = bitcast <8 x i16> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <4 x i64> @test_mm256_permutex2var_epi16(<4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2w %ymm2, %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2w %ymm2, %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <16 x i16>
+  %1 = bitcast <4 x i64> %__I to <16 x i16>
+  %2 = bitcast <4 x i64> %__B to <16 x i16>
+  %3 = tail call <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
+  %4 = bitcast <16 x i16> %3 to <4 x i64>
+  ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_mask_permutex2var_epi16(<4 x i64> %__A, i16 zeroext %__U, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2w %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2w %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <16 x i16>
+  %1 = bitcast <4 x i64> %__I to <16 x i16>
+  %2 = bitcast <4 x i64> %__B to <16 x i16>
+  %3 = tail call <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %0
+  %6 = bitcast <16 x i16> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+define <4 x i64> @test_mm256_maskz_permutex2var_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_permutex2var_epi16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2w %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_permutex2var_epi16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2w %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <16 x i16>
+  %1 = bitcast <4 x i64> %__I to <16 x i16>
+  %2 = bitcast <4 x i64> %__B to <16 x i16>
+  %3 = tail call <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
+  %6 = bitcast <16 x i16> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+declare <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16>, <8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>)
+
 !0 = !{i32 1}
 

Added: llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll?rev=333386&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll Mon May 28 20:26:30 2018
@@ -0,0 +1,103 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512vbmi | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vbmi-builtins.c
+
+define <8 x i64> @test_mm512_mask2_permutex2var_epi8(<8 x i64> %__A, <8 x i64> %__I, i64 %__U, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask2_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    kunpckdq %k1, %k0, %k1
+; X32-NEXT:    vpermi2b %zmm2, %zmm0, %zmm1 {%k1}
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask2_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovq %rdi, %k1
+; X64-NEXT:    vpermi2b %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <64 x i8>
+  %1 = bitcast <8 x i64> %__I to <64 x i8>
+  %2 = bitcast <8 x i64> %__B to <64 x i8>
+  %3 = tail call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %0, <64 x i8> %1, <64 x i8> %2)
+  %4 = bitcast i64 %__U to <64 x i1>
+  %5 = select <64 x i1> %4, <64 x i8> %3, <64 x i8> %1
+  %6 = bitcast <64 x i8> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+define <8 x i64> @test_mm512_permutex2var_epi8(<8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2b %zmm2, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2b %zmm2, %zmm1, %zmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <64 x i8>
+  %1 = bitcast <8 x i64> %__I to <64 x i8>
+  %2 = bitcast <8 x i64> %__B to <64 x i8>
+  %3 = tail call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %0, <64 x i8> %1, <64 x i8> %2)
+  %4 = bitcast <64 x i8> %3 to <8 x i64>
+  ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_mask_permutex2var_epi8(<8 x i64> %__A, i64 %__U, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    kunpckdq %k1, %k0, %k1
+; X32-NEXT:    vpermt2b %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovq %rdi, %k1
+; X64-NEXT:    vpermt2b %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <64 x i8>
+  %1 = bitcast <8 x i64> %__I to <64 x i8>
+  %2 = bitcast <8 x i64> %__B to <64 x i8>
+  %3 = tail call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %0, <64 x i8> %1, <64 x i8> %2)
+  %4 = bitcast i64 %__U to <64 x i1>
+  %5 = select <64 x i1> %4, <64 x i8> %3, <64 x i8> %0
+  %6 = bitcast <64 x i8> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+define <8 x i64> @test_mm512_maskz_permutex2var_epi8(i64 %__U, <8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    kunpckdq %k1, %k0, %k1
+; X32-NEXT:    vpermt2b %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovq %rdi, %k1
+; X64-NEXT:    vpermt2b %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__A to <64 x i8>
+  %1 = bitcast <8 x i64> %__I to <64 x i8>
+  %2 = bitcast <8 x i64> %__B to <64 x i8>
+  %3 = tail call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %0, <64 x i8> %1, <64 x i8> %2)
+  %4 = bitcast i64 %__U to <64 x i1>
+  %5 = select <64 x i1> %4, <64 x i8> %3, <64 x i8> zeroinitializer
+  %6 = bitcast <64 x i8> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+declare <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>)

Added: llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll?rev=333386&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll Mon May 28 20:26:30 2018
@@ -0,0 +1,188 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512vbmi,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vlbw-builtins.c
+
+define <2 x i64> @test_mm_mask2_permutex2var_epi8(<2 x i64> %__A, <2 x i64> %__I, i16 zeroext %__U, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask2_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermi2b %xmm2, %xmm0, %xmm1 {%k1}
+; X32-NEXT:    vmovdqa %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask2_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermi2b %xmm2, %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovdqa %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <16 x i8>
+  %1 = bitcast <2 x i64> %__I to <16 x i8>
+  %2 = bitcast <2 x i64> %__B to <16 x i8>
+  %3 = tail call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> %1
+  %6 = bitcast <16 x i8> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <4 x i64> @test_mm256_mask2_permutex2var_epi8(<4 x i64> %__A, <4 x i64> %__I, i32 %__U, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask2_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermi2b %ymm2, %ymm0, %ymm1 {%k1}
+; X32-NEXT:    vmovdqa %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask2_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermi2b %ymm2, %ymm0, %ymm1 {%k1}
+; X64-NEXT:    vmovdqa %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <32 x i8>
+  %1 = bitcast <4 x i64> %__I to <32 x i8>
+  %2 = bitcast <4 x i64> %__B to <32 x i8>
+  %3 = tail call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> %0, <32 x i8> %1, <32 x i8> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> %1
+  %6 = bitcast <32 x i8> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+define <2 x i64> @test_mm_permutex2var_epi8(<2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2b %xmm2, %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2b %xmm2, %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <16 x i8>
+  %1 = bitcast <2 x i64> %__I to <16 x i8>
+  %2 = bitcast <2 x i64> %__B to <16 x i8>
+  %3 = tail call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+  %4 = bitcast <16 x i8> %3 to <2 x i64>
+  ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_mask_permutex2var_epi8(<2 x i64> %__A, i16 zeroext %__U, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2b %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2b %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <16 x i8>
+  %1 = bitcast <2 x i64> %__I to <16 x i8>
+  %2 = bitcast <2 x i64> %__B to <16 x i8>
+  %3 = tail call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> %0
+  %6 = bitcast <16 x i8> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <2 x i64> @test_mm_maskz_permutex2var_epi8(i16 zeroext %__U, <2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2b %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2b %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <16 x i8>
+  %1 = bitcast <2 x i64> %__I to <16 x i8>
+  %2 = bitcast <2 x i64> %__B to <16 x i8>
+  %3 = tail call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> zeroinitializer
+  %6 = bitcast <16 x i8> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <4 x i64> @test_mm256_permutex2var_epi8(<4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2b %ymm2, %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2b %ymm2, %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <32 x i8>
+  %1 = bitcast <4 x i64> %__I to <32 x i8>
+  %2 = bitcast <4 x i64> %__B to <32 x i8>
+  %3 = tail call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> %0, <32 x i8> %1, <32 x i8> %2)
+  %4 = bitcast <32 x i8> %3 to <4 x i64>
+  ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_mask_permutex2var_epi8(<4 x i64> %__A, i32 %__U, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2b %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2b %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <32 x i8>
+  %1 = bitcast <4 x i64> %__I to <32 x i8>
+  %2 = bitcast <4 x i64> %__B to <32 x i8>
+  %3 = tail call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> %0, <32 x i8> %1, <32 x i8> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> %0
+  %6 = bitcast <32 x i8> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+define <4 x i64> @test_mm256_maskz_permutex2var_epi8(i32 %__U, <4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_permutex2var_epi8:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermt2b %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_permutex2var_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpermt2b %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <32 x i8>
+  %1 = bitcast <4 x i64> %__I to <32 x i8>
+  %2 = bitcast <4 x i64> %__B to <32 x i8>
+  %3 = tail call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> %0, <32 x i8> %1, <32 x i8> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> zeroinitializer
+  %6 = bitcast <32 x i8> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+declare <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>)
+declare <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>)

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll?rev=333386&r1=333385&r2=333386&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll Mon May 28 20:26:30 2018
@@ -3804,6 +3804,685 @@ entry:
   ret <4 x i64> %2
 }
 
+define <2 x i64> @test_mm_mask2_permutex2var_epi32(<2 x i64> %__A, <2 x i64> %__I, i8 zeroext %__U, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask2_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2d %xmm2, %xmm0, %xmm1 {%k1}
+; X32-NEXT:    vmovdqa %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask2_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2d %xmm2, %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovdqa %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <4 x i32>
+  %1 = bitcast <2 x i64> %__I to <4 x i32>
+  %2 = bitcast <2 x i64> %__B to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> %1
+  %6 = bitcast <4 x i32> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <4 x i64> @test_mm256_mask2_permutex2var_epi32(<4 x i64> %__A, <4 x i64> %__I, i8 zeroext %__U, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask2_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2d %ymm2, %ymm0, %ymm1 {%k1}
+; X32-NEXT:    vmovdqa %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask2_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2d %ymm2, %ymm0, %ymm1 {%k1}
+; X64-NEXT:    vmovdqa %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <8 x i32>
+  %1 = bitcast <4 x i64> %__I to <8 x i32>
+  %2 = bitcast <4 x i64> %__B to <8 x i32>
+  %3 = tail call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> %1
+  %6 = bitcast <8 x i32> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+define <2 x double> @test_mm_mask2_permutex2var_pd(<2 x double> %__A, <2 x i64> %__I, i8 zeroext %__U, <2 x double> %__B) {
+; X32-LABEL: test_mm_mask2_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2pd %xmm2, %xmm0, %xmm1 {%k1}
+; X32-NEXT:    vmovapd %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask2_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2pd %xmm2, %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovapd %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <2 x double> @llvm.x86.avx512.vpermi2var.pd.128(<2 x double> %__A, <2 x i64> %__I, <2 x double> %__B)
+  %1 = bitcast <2 x i64> %__I to <2 x double>
+  %2 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %1
+  ret <2 x double> %3
+}
+
+define <4 x double> @test_mm256_mask2_permutex2var_pd(<4 x double> %__A, <4 x i64> %__I, i8 zeroext %__U, <4 x double> %__B) {
+; X32-LABEL: test_mm256_mask2_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2pd %ymm2, %ymm0, %ymm1 {%k1}
+; X32-NEXT:    vmovapd %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask2_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2pd %ymm2, %ymm0, %ymm1 {%k1}
+; X64-NEXT:    vmovapd %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <4 x double> @llvm.x86.avx512.vpermi2var.pd.256(<4 x double> %__A, <4 x i64> %__I, <4 x double> %__B)
+  %1 = bitcast <4 x i64> %__I to <4 x double>
+  %2 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %1
+  ret <4 x double> %3
+}
+
+define <4 x float> @test_mm_mask2_permutex2var_ps(<4 x float> %__A, <2 x i64> %__I, i8 zeroext %__U, <4 x float> %__B) {
+; X32-LABEL: test_mm_mask2_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2ps %xmm2, %xmm0, %xmm1 {%k1}
+; X32-NEXT:    vmovaps %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask2_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2ps %xmm2, %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovaps %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__I to <4 x i32>
+  %1 = tail call <4 x float> @llvm.x86.avx512.vpermi2var.ps.128(<4 x float> %__A, <4 x i32> %0, <4 x float> %__B)
+  %2 = bitcast <2 x i64> %__I to <4 x float>
+  %3 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = select <4 x i1> %extract.i, <4 x float> %1, <4 x float> %2
+  ret <4 x float> %4
+}
+
+define <8 x float> @test_mm256_mask2_permutex2var_ps(<8 x float> %__A, <4 x i64> %__I, i8 zeroext %__U, <8 x float> %__B) {
+; X32-LABEL: test_mm256_mask2_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2ps %ymm2, %ymm0, %ymm1 {%k1}
+; X32-NEXT:    vmovaps %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask2_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2ps %ymm2, %ymm0, %ymm1 {%k1}
+; X64-NEXT:    vmovaps %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__I to <8 x i32>
+  %1 = tail call <8 x float> @llvm.x86.avx512.vpermi2var.ps.256(<8 x float> %__A, <8 x i32> %0, <8 x float> %__B)
+  %2 = bitcast <4 x i64> %__I to <8 x float>
+  %3 = bitcast i8 %__U to <8 x i1>
+  %4 = select <8 x i1> %3, <8 x float> %1, <8 x float> %2
+  ret <8 x float> %4
+}
+
+define <2 x i64> @test_mm_mask2_permutex2var_epi64(<2 x i64> %__A, <2 x i64> %__I, i8 zeroext %__U, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask2_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2q %xmm2, %xmm0, %xmm1 {%k1}
+; X32-NEXT:    vmovdqa %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask2_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2q %xmm2, %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovdqa %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__I
+  ret <2 x i64> %2
+}
+
+define <4 x i64> @test_mm256_mask2_permutex2var_epi64(<4 x i64> %__A, <4 x i64> %__I, i8 zeroext %__U, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask2_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermi2q %ymm2, %ymm0, %ymm1 {%k1}
+; X32-NEXT:    vmovdqa %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask2_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2q %ymm2, %ymm0, %ymm1 {%k1}
+; X64-NEXT:    vmovdqa %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__I
+  ret <4 x i64> %2
+}
+
+define <2 x i64> @test_mm_permutex2var_epi32(<2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2d %xmm2, %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2d %xmm2, %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <4 x i32>
+  %1 = bitcast <2 x i64> %__I to <4 x i32>
+  %2 = bitcast <2 x i64> %__B to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  %4 = bitcast <4 x i32> %3 to <2 x i64>
+  ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_mask_permutex2var_epi32(<2 x i64> %__A, i8 zeroext %__U, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2d %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2d %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <4 x i32>
+  %1 = bitcast <2 x i64> %__I to <4 x i32>
+  %2 = bitcast <2 x i64> %__B to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> %0
+  %6 = bitcast <4 x i32> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <2 x i64> @test_mm_maskz_permutex2var_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2d %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2d %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__A to <4 x i32>
+  %1 = bitcast <2 x i64> %__I to <4 x i32>
+  %2 = bitcast <2 x i64> %__B to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> zeroinitializer
+  %6 = bitcast <4 x i32> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <4 x i64> @test_mm256_permutex2var_epi32(<4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2d %ymm2, %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2d %ymm2, %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <8 x i32>
+  %1 = bitcast <4 x i64> %__I to <8 x i32>
+  %2 = bitcast <4 x i64> %__B to <8 x i32>
+  %3 = tail call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
+  %4 = bitcast <8 x i32> %3 to <4 x i64>
+  ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_mask_permutex2var_epi32(<4 x i64> %__A, i8 zeroext %__U, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2d %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2d %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <8 x i32>
+  %1 = bitcast <4 x i64> %__I to <8 x i32>
+  %2 = bitcast <4 x i64> %__B to <8 x i32>
+  %3 = tail call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> %0
+  %6 = bitcast <8 x i32> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+define <4 x i64> @test_mm256_maskz_permutex2var_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_permutex2var_epi32:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2d %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_permutex2var_epi32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2d %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__A to <8 x i32>
+  %1 = bitcast <4 x i64> %__I to <8 x i32>
+  %2 = bitcast <4 x i64> %__B to <8 x i32>
+  %3 = tail call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> zeroinitializer
+  %6 = bitcast <8 x i32> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+define <2 x double> @test_mm_permutex2var_pd(<2 x double> %__A, <2 x i64> %__I, <2 x double> %__B) {
+; X32-LABEL: test_mm_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2pd %xmm2, %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2pd %xmm2, %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <2 x double> @llvm.x86.avx512.vpermi2var.pd.128(<2 x double> %__A, <2 x i64> %__I, <2 x double> %__B)
+  ret <2 x double> %0
+}
+
+define <2 x double> @test_mm_mask_permutex2var_pd(<2 x double> %__A, i8 zeroext %__U, <2 x i64> %__I, <2 x double> %__B) {
+; X32-LABEL: test_mm_mask_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2pd %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2pd %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <2 x double> @llvm.x86.avx512.vpermi2var.pd.128(<2 x double> %__A, <2 x i64> %__I, <2 x double> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A
+  ret <2 x double> %2
+}
+
+define <2 x double> @test_mm_maskz_permutex2var_pd(i8 zeroext %__U, <2 x double> %__A, <2 x i64> %__I, <2 x double> %__B) {
+; X32-LABEL: test_mm_maskz_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2pd %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2pd %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <2 x double> @llvm.x86.avx512.vpermi2var.pd.128(<2 x double> %__A, <2 x i64> %__I, <2 x double> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer
+  ret <2 x double> %2
+}
+
+define <4 x double> @test_mm256_permutex2var_pd(<4 x double> %__A, <4 x i64> %__I, <4 x double> %__B) {
+; X32-LABEL: test_mm256_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2pd %ymm2, %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2pd %ymm2, %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <4 x double> @llvm.x86.avx512.vpermi2var.pd.256(<4 x double> %__A, <4 x i64> %__I, <4 x double> %__B)
+  ret <4 x double> %0
+}
+
+define <4 x double> @test_mm256_mask_permutex2var_pd(<4 x double> %__A, i8 zeroext %__U, <4 x i64> %__I, <4 x double> %__B) {
+; X32-LABEL: test_mm256_mask_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2pd %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2pd %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <4 x double> @llvm.x86.avx512.vpermi2var.pd.256(<4 x double> %__A, <4 x i64> %__I, <4 x double> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A
+  ret <4 x double> %2
+}
+
+define <4 x double> @test_mm256_maskz_permutex2var_pd(i8 zeroext %__U, <4 x double> %__A, <4 x i64> %__I, <4 x double> %__B) {
+; X32-LABEL: test_mm256_maskz_permutex2var_pd:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2pd %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_permutex2var_pd:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2pd %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <4 x double> @llvm.x86.avx512.vpermi2var.pd.256(<4 x double> %__A, <4 x i64> %__I, <4 x double> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer
+  ret <4 x double> %2
+}
+
+define <4 x float> @test_mm_permutex2var_ps(<4 x float> %__A, <2 x i64> %__I, <4 x float> %__B) {
+; X32-LABEL: test_mm_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2ps %xmm2, %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2ps %xmm2, %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__I to <4 x i32>
+  %1 = tail call <4 x float> @llvm.x86.avx512.vpermi2var.ps.128(<4 x float> %__A, <4 x i32> %0, <4 x float> %__B)
+  ret <4 x float> %1
+}
+
+define <4 x float> @test_mm_mask_permutex2var_ps(<4 x float> %__A, i8 zeroext %__U, <2 x i64> %__I, <4 x float> %__B) {
+; X32-LABEL: test_mm_mask_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2ps %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2ps %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__I to <4 x i32>
+  %1 = tail call <4 x float> @llvm.x86.avx512.vpermi2var.ps.128(<4 x float> %__A, <4 x i32> %0, <4 x float> %__B)
+  %2 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract.i, <4 x float> %1, <4 x float> %__A
+  ret <4 x float> %3
+}
+
+define <4 x float> @test_mm_maskz_permutex2var_ps(i8 zeroext %__U, <4 x float> %__A, <2 x i64> %__I, <4 x float> %__B) {
+; X32-LABEL: test_mm_maskz_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2ps %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2ps %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__I to <4 x i32>
+  %1 = tail call <4 x float> @llvm.x86.avx512.vpermi2var.ps.128(<4 x float> %__A, <4 x i32> %0, <4 x float> %__B)
+  %2 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract.i, <4 x float> %1, <4 x float> zeroinitializer
+  ret <4 x float> %3
+}
+
+define <8 x float> @test_mm256_permutex2var_ps(<8 x float> %__A, <4 x i64> %__I, <8 x float> %__B) {
+; X32-LABEL: test_mm256_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2ps %ymm2, %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2ps %ymm2, %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__I to <8 x i32>
+  %1 = tail call <8 x float> @llvm.x86.avx512.vpermi2var.ps.256(<8 x float> %__A, <8 x i32> %0, <8 x float> %__B)
+  ret <8 x float> %1
+}
+
+define <8 x float> @test_mm256_mask_permutex2var_ps(<8 x float> %__A, i8 zeroext %__U, <4 x i64> %__I, <8 x float> %__B) {
+; X32-LABEL: test_mm256_mask_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2ps %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2ps %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__I to <8 x i32>
+  %1 = tail call <8 x float> @llvm.x86.avx512.vpermi2var.ps.256(<8 x float> %__A, <8 x i32> %0, <8 x float> %__B)
+  %2 = bitcast i8 %__U to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x float> %1, <8 x float> %__A
+  ret <8 x float> %3
+}
+
+define <8 x float> @test_mm256_maskz_permutex2var_ps(i8 zeroext %__U, <8 x float> %__A, <4 x i64> %__I, <8 x float> %__B) {
+; X32-LABEL: test_mm256_maskz_permutex2var_ps:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2ps %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_permutex2var_ps:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2ps %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__I to <8 x i32>
+  %1 = tail call <8 x float> @llvm.x86.avx512.vpermi2var.ps.256(<8 x float> %__A, <8 x i32> %0, <8 x float> %__B)
+  %2 = bitcast i8 %__U to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x float> %1, <8 x float> zeroinitializer
+  ret <8 x float> %3
+}
+
+define <2 x i64> @test_mm_permutex2var_epi64(<2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2q %xmm2, %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2q %xmm2, %xmm1, %xmm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B)
+  ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_mask_permutex2var_epi64(<2 x i64> %__A, i8 zeroext %__U, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2q %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2q %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__A
+  ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_maskz_permutex2var_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2q %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2q %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> %__A, <2 x i64> %__I, <2 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
+  ret <2 x i64> %2
+}
+
+define <4 x i64> @test_mm256_permutex2var_epi64(<4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    vpermt2q %ymm2, %ymm1, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vpermt2q %ymm2, %ymm1, %ymm0
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B)
+  ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_mask_permutex2var_epi64(<4 x i64> %__A, i8 zeroext %__U, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2q %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2q %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__A
+  ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_maskz_permutex2var_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_permutex2var_epi64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermt2q %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_permutex2var_epi64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2q %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = tail call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> %__A, <4 x i64> %__I, <4 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
+  ret <4 x i64> %2
+}
+
+
 declare <4 x i32> @llvm.x86.avx512.mask.cvtpd2dq.128(<2 x double>, <4 x i32>, i8)
 declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>)
 declare <4 x float> @llvm.x86.avx512.mask.cvtpd2ps(<2 x double>, <4 x float>, i8)
@@ -3823,5 +4502,13 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2
 declare <4 x i32> @llvm.x86.avx512.mask.cvttps2udq.128(<4 x float>, <4 x i32>, i8)
 declare <8 x i32> @llvm.x86.avx512.mask.cvttps2udq.256(<8 x float>, <8 x i32>, i8)
 declare <8 x i16> @llvm.x86.avx512.mask.pmov.dw.256(<8 x i32>, <8 x i16>, i8)
+declare <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <2 x double> @llvm.x86.avx512.vpermi2var.pd.128(<2 x double>, <2 x i64>, <2 x double>)
+declare <4 x double> @llvm.x86.avx512.vpermi2var.pd.256(<4 x double>, <4 x i64>, <4 x double>)
+declare <4 x float> @llvm.x86.avx512.vpermi2var.ps.128(<4 x float>, <4 x i32>, <4 x float>)
+declare <8 x float> @llvm.x86.avx512.vpermi2var.ps.256(<8 x float>, <8 x i32>, <8 x float>)
+declare <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64>, <4 x i64>, <4 x i64>)
 
 !0 = !{i32 1}




More information about the llvm-commits mailing list