[llvm] r333794 - [X86] Add fast-isel tests for avx512vbmi2 instructions.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 1 14:59:22 PDT 2018
Author: ctopper
Date: Fri Jun 1 14:59:22 2018
New Revision: 333794
URL: http://llvm.org/viewvc/llvm-project?rev=333794&view=rev
Log:
[X86] Add fast-isel tests for avx512vbmi2 instructions.
Added:
llvm/trunk/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll
llvm/trunk/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
Modified:
llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll
Added: llvm/trunk/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll?rev=333794&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll Fri Jun 1 14:59:22 2018
@@ -0,0 +1,956 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vbmi2-builtins.c
+
+define <8 x i64> @test_mm512_mask_compress_epi16(<8 x i64> %__S, i32 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_mask_compress_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpcompressw %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_compress_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressw %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <32 x i16>
+ %1 = bitcast <8 x i64> %__S to <32 x i16>
+ %2 = tail call <32 x i16> @llvm.x86.avx512.mask.compress.w.512(<32 x i16> %0, <32 x i16> %1, i32 %__U)
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_maskz_compress_epi16(i32 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_maskz_compress_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpcompressw %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_compress_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressw %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx512.mask.compress.w.512(<32 x i16> %0, <32 x i16> zeroinitializer, i32 %__U)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_mask_compress_epi8(<8 x i64> %__S, i64 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_mask_compress_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: kunpckdq %k1, %k0, %k1
+; X32-NEXT: vpcompressb %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_compress_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovq %rdi, %k1
+; X64-NEXT: vpcompressb %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <64 x i8>
+ %1 = bitcast <8 x i64> %__S to <64 x i8>
+ %2 = tail call <64 x i8> @llvm.x86.avx512.mask.compress.b.512(<64 x i8> %0, <64 x i8> %1, i64 %__U)
+ %3 = bitcast <64 x i8> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_maskz_compress_epi8(i64 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_maskz_compress_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: kunpckdq %k1, %k0, %k1
+; X32-NEXT: vpcompressb %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_compress_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovq %rdi, %k1
+; X64-NEXT: vpcompressb %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <64 x i8>
+ %1 = tail call <64 x i8> @llvm.x86.avx512.mask.compress.b.512(<64 x i8> %0, <64 x i8> zeroinitializer, i64 %__U)
+ %2 = bitcast <64 x i8> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define void @test_mm512_mask_compressstoreu_epi16(i8* %__P, i32 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_mask_compressstoreu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcompressw %zmm0, (%eax) {%k1}
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_compressstoreu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %esi, %k1
+; X64-NEXT: vpcompressw %zmm0, (%rdi) {%k1}
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <32 x i16>
+ tail call void @llvm.x86.avx512.mask.compress.store.w.512(i8* %__P, <32 x i16> %0, i32 %__U)
+ ret void
+}
+
+define void @test_mm512_mask_compressstoreu_epi8(i8* %__P, i64 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_mask_compressstoreu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kunpckdq %k1, %k0, %k1
+; X32-NEXT: vpcompressb %zmm0, (%eax) {%k1}
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_compressstoreu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovq %rsi, %k1
+; X64-NEXT: vpcompressb %zmm0, (%rdi) {%k1}
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <64 x i8>
+ tail call void @llvm.x86.avx512.mask.compress.store.b.512(i8* %__P, <64 x i8> %0, i64 %__U)
+ ret void
+}
+
+define <8 x i64> @test_mm512_mask_expand_epi16(<8 x i64> %__S, i32 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_mask_expand_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandw %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_expand_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <32 x i16>
+ %1 = bitcast <8 x i64> %__S to <32 x i16>
+ %2 = tail call <32 x i16> @llvm.x86.avx512.mask.expand.w.512(<32 x i16> %0, <32 x i16> %1, i32 %__U)
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_maskz_expand_epi16(i32 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_maskz_expand_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandw %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_expand_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx512.mask.expand.w.512(<32 x i16> %0, <32 x i16> zeroinitializer, i32 %__U)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_mask_expand_epi8(<8 x i64> %__S, i64 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_mask_expand_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: kunpckdq %k1, %k0, %k1
+; X32-NEXT: vpexpandb %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_expand_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovq %rdi, %k1
+; X64-NEXT: vpexpandb %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <64 x i8>
+ %1 = bitcast <8 x i64> %__S to <64 x i8>
+ %2 = tail call <64 x i8> @llvm.x86.avx512.mask.expand.b.512(<64 x i8> %0, <64 x i8> %1, i64 %__U)
+ %3 = bitcast <64 x i8> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_maskz_expand_epi8(i64 %__U, <8 x i64> %__D) {
+; X32-LABEL: test_mm512_maskz_expand_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: kunpckdq %k1, %k0, %k1
+; X32-NEXT: vpexpandb %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_expand_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovq %rdi, %k1
+; X64-NEXT: vpexpandb %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__D to <64 x i8>
+ %1 = tail call <64 x i8> @llvm.x86.avx512.mask.expand.b.512(<64 x i8> %0, <64 x i8> zeroinitializer, i64 %__U)
+ %2 = bitcast <64 x i8> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_mask_expandloadu_epi16(<8 x i64> %__S, i32 %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm512_mask_expandloadu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandw (%eax), %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_expandloadu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw (%rsi), %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx512.mask.expand.load.w.512(i8* %__P, <32 x i16> %0, i32 %__U)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_maskz_expandloadu_epi16(i32 %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm512_maskz_expandloadu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X32-NEXT: vpexpandw (%eax), %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_expandloadu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpexpandw (%rsi), %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx512.mask.expand.load.w.512(i8* %__P, <32 x i16> zeroinitializer, i32 %__U)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define <8 x i64> @test_mm512_mask_expandloadu_epi8(<8 x i64> %__S, i64 %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm512_mask_expandloadu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kunpckdq %k1, %k0, %k1
+; X32-NEXT: vpexpandb (%eax), %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_expandloadu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovq %rdi, %k1
+; X64-NEXT: vpexpandb (%rsi), %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <64 x i8>
+ %1 = tail call <64 x i8> @llvm.x86.avx512.mask.expand.load.b.512(i8* %__P, <64 x i8> %0, i64 %__U)
+ %2 = bitcast <64 x i8> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_maskz_expandloadu_epi8(i64 %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm512_maskz_expandloadu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kunpckdq %k1, %k0, %k1
+; X32-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X32-NEXT: vpexpandb (%eax), %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_expandloadu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovq %rdi, %k1
+; X64-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpexpandb (%rsi), %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <64 x i8> @llvm.x86.avx512.mask.expand.load.b.512(i8* %__P, <64 x i8> zeroinitializer, i64 %__U)
+ %1 = bitcast <64 x i8> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define <8 x i64> @test_mm512_mask_shldi_epi64(<8 x i64> %__S, i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shldi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldq $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shldi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldq $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshld.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 127, <8 x i64> %__S, i8 %__U)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.vpshld.q.512(<8 x i64>, <8 x i64>, i32, <8 x i64>, i8)
+
+define <8 x i64> @test_mm512_maskz_shldi_epi64(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shldi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldq $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shldi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldq $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshld.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 63, <8 x i64> zeroinitializer, i8 %__U)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_shldi_epi64(<8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shldi_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldq $31, %zmm1, %zmm0, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshld.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 31, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_mask_shldi_epi32(<8 x i64> %__S, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shldi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldd $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shldi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldd $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = bitcast <8 x i64> %__S to <16 x i32>
+ %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshld.d.512(<16 x i32> %0, <16 x i32> %1, i32 127, <16 x i32> %2, i16 %__U)
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.vpshld.d.512(<16 x i32>, <16 x i32>, i32, <16 x i32>, i16)
+
+define <8 x i64> @test_mm512_maskz_shldi_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shldi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldd $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shldi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldd $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshld.d.512(<16 x i32> %0, <16 x i32> %1, i32 63, <16 x i32> zeroinitializer, i16 %__U)
+ %3 = bitcast <16 x i32> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_shldi_epi32(<8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shldi_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldd $31, %zmm1, %zmm0, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshld.d.512(<16 x i32> %0, <16 x i32> %1, i32 31, <16 x i32> zeroinitializer, i16 -1)
+ %3 = bitcast <16 x i32> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_mask_shldi_epi16(<8 x i64> %__S, i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shldi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldw $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shldi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldw $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <32 x i16>
+ %1 = bitcast <8 x i64> %__B to <32 x i16>
+ %2 = bitcast <8 x i64> %__S to <32 x i16>
+ %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %0, <32 x i16> %1, i32 127, <32 x i16> %2, i32 %__U)
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16>, <32 x i16>, i32, <32 x i16>, i32)
+
+define <8 x i64> @test_mm512_maskz_shldi_epi16(i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shldi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldw $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shldi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldw $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <32 x i16>
+ %1 = bitcast <8 x i64> %__B to <32 x i16>
+ %2 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %0, <32 x i16> %1, i32 63, <32 x i16> zeroinitializer, i32 %__U)
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_shldi_epi16(<8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shldi_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldw $31, %zmm1, %zmm0, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__A to <32 x i16>
+ %1 = bitcast <8 x i64> %__B to <32 x i16>
+ %2 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %0, <32 x i16> %1, i32 31, <32 x i16> zeroinitializer, i32 -1)
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_mask_shrdi_epi64(<8 x i64> %__S, i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shrdi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdq $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shrdi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdq $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshrd.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 127, <8 x i64> %__S, i8 %__U)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.vpshrd.q.512(<8 x i64>, <8 x i64>, i32, <8 x i64>, i8)
+
+define <8 x i64> @test_mm512_maskz_shrdi_epi64(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shrdi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdq $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shrdi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdq $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshrd.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 63, <8 x i64> zeroinitializer, i8 %__U)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_shrdi_epi64(<8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shrdi_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdq $31, %zmm1, %zmm0, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshrd.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 31, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_mask_shrdi_epi32(<8 x i64> %__S, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shrdi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdd $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shrdi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdd $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = bitcast <8 x i64> %__S to <16 x i32>
+ %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshrd.d.512(<16 x i32> %0, <16 x i32> %1, i32 127, <16 x i32> %2, i16 %__U)
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.vpshrd.d.512(<16 x i32>, <16 x i32>, i32, <16 x i32>, i16)
+
+define <8 x i64> @test_mm512_maskz_shrdi_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shrdi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdd $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shrdi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdd $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshrd.d.512(<16 x i32> %0, <16 x i32> %1, i32 63, <16 x i32> zeroinitializer, i16 %__U)
+ %3 = bitcast <16 x i32> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_shrdi_epi32(<8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shrdi_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdd $31, %zmm1, %zmm0, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshrd.d.512(<16 x i32> %0, <16 x i32> %1, i32 31, <16 x i32> zeroinitializer, i16 -1)
+ %3 = bitcast <16 x i32> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_mask_shrdi_epi16(<8 x i64> %__S, i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shrdi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdw $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shrdi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdw $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <32 x i16>
+ %1 = bitcast <8 x i64> %__B to <32 x i16>
+ %2 = bitcast <8 x i64> %__S to <32 x i16>
+ %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %0, <32 x i16> %1, i32 127, <32 x i16> %2, i32 %__U)
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16>, <32 x i16>, i32, <32 x i16>, i32)
+
+define <8 x i64> @test_mm512_maskz_shrdi_epi16(i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shrdi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdw $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shrdi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdw $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <32 x i16>
+ %1 = bitcast <8 x i64> %__B to <32 x i16>
+ %2 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %0, <32 x i16> %1, i32 63, <32 x i16> zeroinitializer, i32 %__U)
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_shrdi_epi16(<8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shrdi_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdw $31, %zmm1, %zmm0, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__A to <32 x i16>
+ %1 = bitcast <8 x i64> %__B to <32 x i16>
+ %2 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %0, <32 x i16> %1, i32 31, <32 x i16> zeroinitializer, i32 -1)
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_mask_shldv_epi64(<8 x i64> %__S, i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shldv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvq %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shldv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvq %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 %__U)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_maskz_shldv_epi64(i8 zeroext %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shldv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvq %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shldv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvq %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 %__U)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_shldv_epi64(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shldv_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvq %zmm2, %zmm1, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 -1)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_mask_shldv_epi32(<8 x i64> %__S, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shldv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldvd %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shldv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvd %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = bitcast <8 x i64> %__A to <16 x i32>
+ %2 = bitcast <8 x i64> %__B to <16 x i32>
+ %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 %__U)
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_maskz_shldv_epi32(i16 zeroext %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shldv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldvd %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shldv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvd %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = bitcast <8 x i64> %__A to <16 x i32>
+ %2 = bitcast <8 x i64> %__B to <16 x i32>
+ %3 = tail call <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 %__U)
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_shldv_epi32(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shldv_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvd %zmm2, %zmm1, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = bitcast <8 x i64> %__A to <16 x i32>
+ %2 = bitcast <8 x i64> %__B to <16 x i32>
+ %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 -1)
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_mask_shldv_epi16(<8 x i64> %__S, i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shldv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldvw %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shldv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvw %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = bitcast <8 x i64> %__A to <32 x i16>
+ %2 = bitcast <8 x i64> %__B to <32 x i16>
+ %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 %__U)
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_maskz_shldv_epi16(i32 %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shldv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldvw %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shldv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvw %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = bitcast <8 x i64> %__A to <32 x i16>
+ %2 = bitcast <8 x i64> %__B to <32 x i16>
+ %3 = tail call <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 %__U)
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_shldv_epi16(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shldv_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvw %zmm2, %zmm1, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = bitcast <8 x i64> %__A to <32 x i16>
+ %2 = bitcast <8 x i64> %__B to <32 x i16>
+ %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 -1)
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_mask_shrdv_epi64(<8 x i64> %__S, i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shrdv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvq %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shrdv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvq %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 %__U)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_maskz_shrdv_epi64(i8 zeroext %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shrdv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvq %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shrdv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvq %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 %__U)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_shrdv_epi64(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shrdv_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvq %zmm2, %zmm1, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 -1)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_mask_shrdv_epi32(<8 x i64> %__S, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shrdv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdvd %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shrdv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvd %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = bitcast <8 x i64> %__A to <16 x i32>
+ %2 = bitcast <8 x i64> %__B to <16 x i32>
+ %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 %__U)
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_maskz_shrdv_epi32(i16 zeroext %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shrdv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdvd %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shrdv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvd %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = bitcast <8 x i64> %__A to <16 x i32>
+ %2 = bitcast <8 x i64> %__B to <16 x i32>
+ %3 = tail call <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 %__U)
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_shrdv_epi32(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shrdv_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvd %zmm2, %zmm1, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = bitcast <8 x i64> %__A to <16 x i32>
+ %2 = bitcast <8 x i64> %__B to <16 x i32>
+ %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 -1)
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_mask_shrdv_epi16(<8 x i64> %__S, i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_mask_shrdv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdvw %zmm2, %zmm1, %zmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_shrdv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvw %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = bitcast <8 x i64> %__A to <32 x i16>
+ %2 = bitcast <8 x i64> %__B to <32 x i16>
+ %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 %__U)
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_maskz_shrdv_epi16(i32 %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; X32-LABEL: test_mm512_maskz_shrdv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdvw %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_shrdv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvw %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = bitcast <8 x i64> %__A to <32 x i16>
+ %2 = bitcast <8 x i64> %__B to <32 x i16>
+ %3 = tail call <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 %__U)
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_shrdv_epi16(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
+; ALL-LABEL: test_mm512_shrdv_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvw %zmm2, %zmm1, %zmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = bitcast <8 x i64> %__A to <32 x i16>
+ %2 = bitcast <8 x i64> %__B to <32 x i16>
+ %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 -1)
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.compress.w.512(<32 x i16>, <32 x i16>, i32)
+declare <64 x i8> @llvm.x86.avx512.mask.compress.b.512(<64 x i8>, <64 x i8>, i64)
+declare void @llvm.x86.avx512.mask.compress.store.w.512(i8*, <32 x i16>, i32)
+declare void @llvm.x86.avx512.mask.compress.store.b.512(i8*, <64 x i8>, i64)
+declare <32 x i16> @llvm.x86.avx512.mask.expand.w.512(<32 x i16>, <32 x i16>, i32)
+declare <64 x i8> @llvm.x86.avx512.mask.expand.b.512(<64 x i8>, <64 x i8>, i64)
+declare <32 x i16> @llvm.x86.avx512.mask.expand.load.w.512(i8*, <32 x i16>, i32)
+declare <64 x i8> @llvm.x86.avx512.mask.expand.load.b.512(i8*, <64 x i8>, i64)
+declare <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+declare <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+declare <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+declare <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+declare <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+declare <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
Added: llvm/trunk/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll?rev=333794&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll Fri Jun 1 14:59:22 2018
@@ -0,0 +1,1905 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vlvbmi2-builtins.c
+
+define <2 x i64> @test_mm_mask_compress_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_mask_compress_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpcompressw %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_compress_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressw %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <8 x i16>
+ %1 = bitcast <2 x i64> %__S to <8 x i16>
+ %2 = tail call <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16> %0, <8 x i16> %1, i8 %__U)
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_maskz_compress_epi16(i8 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_maskz_compress_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpcompressw %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_compress_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressw %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16> %0, <8 x i16> zeroinitializer, i8 %__U)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_mask_compress_epi8(<2 x i64> %__S, i16 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_mask_compress_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpcompressb %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_compress_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressb %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <16 x i8>
+ %1 = bitcast <2 x i64> %__S to <16 x i8>
+ %2 = tail call <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8> %0, <16 x i8> %1, i16 %__U)
+ %3 = bitcast <16 x i8> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_maskz_compress_epi8(i16 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_maskz_compress_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_compress_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <16 x i8>
+ %1 = tail call <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8> %0, <16 x i8> zeroinitializer, i16 %__U)
+ %2 = bitcast <16 x i8> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define void @test_mm_mask_compressstoreu_epi16(i8* %__P, i8 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_mask_compressstoreu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpcompressw %xmm0, (%ecx) {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_compressstoreu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %esi, %k1
+; X64-NEXT: vpcompressw %xmm0, (%rdi) {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <8 x i16>
+ tail call void @llvm.x86.avx512.mask.compress.store.w.128(i8* %__P, <8 x i16> %0, i8 %__U)
+ ret void
+}
+
+define void @test_mm_mask_compressstoreu_epi8(i8* %__P, i16 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_mask_compressstoreu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcompressb %xmm0, (%eax) {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_compressstoreu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %esi, %k1
+; X64-NEXT: vpcompressb %xmm0, (%rdi) {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <16 x i8>
+ tail call void @llvm.x86.avx512.mask.compress.store.b.128(i8* %__P, <16 x i8> %0, i16 %__U)
+ ret void
+}
+
+define <2 x i64> @test_mm_mask_expand_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_mask_expand_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpexpandw %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_expand_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <8 x i16>
+ %1 = bitcast <2 x i64> %__S to <8 x i16>
+ %2 = tail call <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16> %0, <8 x i16> %1, i8 %__U)
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_maskz_expand_epi16(i8 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_maskz_expand_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpexpandw %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_expand_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16> %0, <8 x i16> zeroinitializer, i8 %__U)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_mask_expand_epi8(<2 x i64> %__S, i16 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_mask_expand_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandb %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_expand_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandb %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <16 x i8>
+ %1 = bitcast <2 x i64> %__S to <16 x i8>
+ %2 = tail call <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8> %0, <16 x i8> %1, i16 %__U)
+ %3 = bitcast <16 x i8> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_maskz_expand_epi8(i16 zeroext %__U, <2 x i64> %__D) {
+; X32-LABEL: test_mm_maskz_expand_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandb %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_expand_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandb %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__D to <16 x i8>
+ %1 = tail call <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8> %0, <16 x i8> zeroinitializer, i16 %__U)
+ %2 = bitcast <16 x i8> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_mask_expandloadu_epi16(<2 x i64> %__S, i8 zeroext %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm_mask_expandloadu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X32-NEXT: kmovd %ecx, %k1
+; X32-NEXT: vpexpandw (%eax), %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_expandloadu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw (%rsi), %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx512.mask.expand.load.w.128(i8* %__P, <8 x i16> %0, i8 %__U)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_maskz_expandloadu_epi16(i8 zeroext %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm_maskz_expandloadu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X32-NEXT: kmovd %ecx, %k1
+; X32-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X32-NEXT: vpexpandw (%eax), %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_expandloadu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpexpandw (%rsi), %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx512.mask.expand.load.w.128(i8* %__P, <8 x i16> zeroinitializer, i8 %__U)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define <2 x i64> @test_mm_mask_expandloadu_epi8(<2 x i64> %__S, i16 zeroext %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm_mask_expandloadu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandb (%eax), %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_expandloadu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandb (%rsi), %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <16 x i8>
+ %1 = tail call <16 x i8> @llvm.x86.avx512.mask.expand.load.b.128(i8* %__P, <16 x i8> %0, i16 %__U)
+ %2 = bitcast <16 x i8> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_maskz_expandloadu_epi8(i16 zeroext %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm_maskz_expandloadu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X32-NEXT: vpexpandb (%eax), %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_expandloadu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpexpandb (%rsi), %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <16 x i8> @llvm.x86.avx512.mask.expand.load.b.128(i8* %__P, <16 x i8> zeroinitializer, i16 %__U)
+ %1 = bitcast <16 x i8> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define <4 x i64> @test_mm256_mask_compress_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_mask_compress_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpcompressw %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_compress_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressw %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <16 x i16>
+ %1 = bitcast <4 x i64> %__S to <16 x i16>
+ %2 = tail call <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16> %0, <16 x i16> %1, i16 %__U)
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_maskz_compress_epi16(i16 zeroext %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_maskz_compress_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpcompressw %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_compress_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressw %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16> %0, <16 x i16> zeroinitializer, i16 %__U)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_mask_compress_epi8(<4 x i64> %__S, i32 %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_mask_compress_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpcompressb %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_compress_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressb %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <32 x i8>
+ %1 = bitcast <4 x i64> %__S to <32 x i8>
+ %2 = tail call <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8> %0, <32 x i8> %1, i32 %__U)
+ %3 = bitcast <32 x i8> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_maskz_compress_epi8(i32 %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_maskz_compress_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpcompressb %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_compress_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpcompressb %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <32 x i8>
+ %1 = tail call <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8> %0, <32 x i8> zeroinitializer, i32 %__U)
+ %2 = bitcast <32 x i8> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define void @test_mm256_mask_compressstoreu_epi16(i8* %__P, i16 zeroext %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_mask_compressstoreu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcompressw %ymm0, (%eax) {%k1}
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_compressstoreu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %esi, %k1
+; X64-NEXT: vpcompressw %ymm0, (%rdi) {%k1}
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <16 x i16>
+ tail call void @llvm.x86.avx512.mask.compress.store.w.256(i8* %__P, <16 x i16> %0, i16 %__U)
+ ret void
+}
+
+define void @test_mm256_mask_compressstoreu_epi8(i8* %__P, i32 %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_mask_compressstoreu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcompressb %ymm0, (%eax) {%k1}
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_compressstoreu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %esi, %k1
+; X64-NEXT: vpcompressb %ymm0, (%rdi) {%k1}
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <32 x i8>
+ tail call void @llvm.x86.avx512.mask.compress.store.b.256(i8* %__P, <32 x i8> %0, i32 %__U)
+ ret void
+}
+
+define <4 x i64> @test_mm256_mask_expand_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_mask_expand_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandw %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_expand_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <16 x i16>
+ %1 = bitcast <4 x i64> %__S to <16 x i16>
+ %2 = tail call <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16> %0, <16 x i16> %1, i16 %__U)
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_maskz_expand_epi16(i16 zeroext %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_maskz_expand_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandw %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_expand_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16> %0, <16 x i16> zeroinitializer, i16 %__U)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_mask_expand_epi8(<4 x i64> %__S, i32 %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_mask_expand_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandb %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_expand_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandb %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <32 x i8>
+ %1 = bitcast <4 x i64> %__S to <32 x i8>
+ %2 = tail call <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8> %0, <32 x i8> %1, i32 %__U)
+ %3 = bitcast <32 x i8> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_maskz_expand_epi8(i32 %__U, <4 x i64> %__D) {
+; X32-LABEL: test_mm256_maskz_expand_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandb %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_expand_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandb %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__D to <32 x i8>
+ %1 = tail call <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8> %0, <32 x i8> zeroinitializer, i32 %__U)
+ %2 = bitcast <32 x i8> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_mask_expandloadu_epi16(<4 x i64> %__S, i16 zeroext %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm256_mask_expandloadu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandw (%eax), %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_expandloadu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandw (%rsi), %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx512.mask.expand.load.w.256(i8* %__P, <16 x i16> %0, i16 %__U)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_maskz_expandloadu_epi16(i16 zeroext %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm256_maskz_expandloadu_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X32-NEXT: vpexpandw (%eax), %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_expandloadu_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpexpandw (%rsi), %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx512.mask.expand.load.w.256(i8* %__P, <16 x i16> zeroinitializer, i16 %__U)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define <4 x i64> @test_mm256_mask_expandloadu_epi8(<4 x i64> %__S, i32 %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm256_mask_expandloadu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpexpandb (%eax), %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_expandloadu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpexpandb (%rsi), %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <32 x i8>
+ %1 = tail call <32 x i8> @llvm.x86.avx512.mask.expand.load.b.256(i8* %__P, <32 x i8> %0, i32 %__U)
+ %2 = bitcast <32 x i8> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_maskz_expandloadu_epi8(i32 %__U, i8* readonly %__P) {
+; X32-LABEL: test_mm256_maskz_expandloadu_epi8:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X32-NEXT: vpexpandb (%eax), %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_expandloadu_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpexpandb (%rsi), %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <32 x i8> @llvm.x86.avx512.mask.expand.load.b.256(i8* %__P, <32 x i8> zeroinitializer, i32 %__U)
+ %1 = bitcast <32 x i8> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define <4 x i64> @test_mm256_mask_shldi_epi64(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shldi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldq $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shldi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldq $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 127, <4 x i64> %__S, i8 %__U)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.vpshld.q.256(<4 x i64>, <4 x i64>, i32, <4 x i64>, i8)
+
+define <4 x i64> @test_mm256_maskz_shldi_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shldi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldq $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shldi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldq $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 63, <4 x i64> zeroinitializer, i8 %__U)
+ ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_shldi_epi64(<4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shldi_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldq $31, %ymm1, %ymm0, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 31, <4 x i64> zeroinitializer, i8 -1)
+ ret <4 x i64> %0
+}
+
+define <2 x i64> @test_mm_mask_shldi_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shldi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldq $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shldi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldq $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 127, <2 x i64> %__S, i8 %__U)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.vpshld.q.128(<2 x i64>, <2 x i64>, i32, <2 x i64>, i8)
+
+define <2 x i64> @test_mm_maskz_shldi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shldi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldq $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shldi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldq $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 63, <2 x i64> zeroinitializer, i8 %__U)
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_shldi_epi64(<2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shldi_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldq $31, %xmm1, %xmm0, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 31, <2 x i64> zeroinitializer, i8 -1)
+ ret <2 x i64> %0
+}
+
+define <4 x i64> @test_mm256_mask_shldi_epi32(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shldi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldd $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shldi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldd $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = bitcast <4 x i64> %__S to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 127, <8 x i32> %2, i8 %__U)
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+declare <8 x i32> @llvm.x86.avx512.mask.vpshld.d.256(<8 x i32>, <8 x i32>, i32, <8 x i32>, i8)
+
+define <4 x i64> @test_mm256_maskz_shldi_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shldi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldd $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shldi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldd $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 63, <8 x i32> zeroinitializer, i8 %__U)
+ %3 = bitcast <8 x i32> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_shldi_epi32(<4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shldi_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldd $31, %ymm1, %ymm0, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 31, <8 x i32> zeroinitializer, i8 -1)
+ %3 = bitcast <8 x i32> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <2 x i64> @test_mm_mask_shldi_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shldi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldd $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shldi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldd $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = bitcast <2 x i64> %__S to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 127, <4 x i32> %2, i8 %__U)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32>, <4 x i32>, i32, <4 x i32>, i8)
+
+define <2 x i64> @test_mm_maskz_shldi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shldi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldd $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shldi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldd $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 63, <4 x i32> zeroinitializer, i8 %__U)
+ %3 = bitcast <4 x i32> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_shldi_epi32(<2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shldi_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldd $31, %xmm1, %xmm0, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 31, <4 x i32> zeroinitializer, i8 -1)
+ %3 = bitcast <4 x i32> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <4 x i64> @test_mm256_mask_shldi_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shldi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldw $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shldi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldw $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <16 x i16>
+ %1 = bitcast <4 x i64> %__B to <16 x i16>
+ %2 = bitcast <4 x i64> %__S to <16 x i16>
+ %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 127, <16 x i16> %2, i16 %__U)
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+declare <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16>, <16 x i16>, i32, <16 x i16>, i16)
+
+define <4 x i64> @test_mm256_maskz_shldi_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shldi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldw $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shldi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldw $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <16 x i16>
+ %1 = bitcast <4 x i64> %__B to <16 x i16>
+ %2 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 63, <16 x i16> zeroinitializer, i16 %__U)
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_shldi_epi16(<4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shldi_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldw $31, %ymm1, %ymm0, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__A to <16 x i16>
+ %1 = bitcast <4 x i64> %__B to <16 x i16>
+ %2 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 31, <16 x i16> zeroinitializer, i16 -1)
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <2 x i64> @test_mm_mask_shldi_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shldi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldw $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shldi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldw $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <8 x i16>
+ %1 = bitcast <2 x i64> %__B to <8 x i16>
+ %2 = bitcast <2 x i64> %__S to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 127, <8 x i16> %2, i8 %__U)
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16>, <8 x i16>, i32, <8 x i16>, i8)
+
+define <2 x i64> @test_mm_maskz_shldi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shldi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldw $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shldi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldw $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <8 x i16>
+ %1 = bitcast <2 x i64> %__B to <8 x i16>
+ %2 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 63, <8 x i16> zeroinitializer, i8 %__U)
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_shldi_epi16(<2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shldi_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldw $31, %xmm1, %xmm0, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__A to <8 x i16>
+ %1 = bitcast <2 x i64> %__B to <8 x i16>
+ %2 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 31, <8 x i16> zeroinitializer, i8 -1)
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <4 x i64> @test_mm256_mask_shrdi_epi64(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shrdi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdq $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shrdi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdq $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 127, <4 x i64> %__S, i8 %__U)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.vpshrd.q.256(<4 x i64>, <4 x i64>, i32, <4 x i64>, i8)
+
+define <4 x i64> @test_mm256_maskz_shrdi_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shrdi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdq $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shrdi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdq $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 63, <4 x i64> zeroinitializer, i8 %__U)
+ ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_shrdi_epi64(<4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shrdi_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdq $31, %ymm1, %ymm0, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 31, <4 x i64> zeroinitializer, i8 -1)
+ ret <4 x i64> %0
+}
+
+define <2 x i64> @test_mm_mask_shrdi_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shrdi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdq $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shrdi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdq $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 127, <2 x i64> %__S, i8 %__U)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.vpshrd.q.128(<2 x i64>, <2 x i64>, i32, <2 x i64>, i8)
+
+define <2 x i64> @test_mm_maskz_shrdi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shrdi_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdq $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shrdi_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdq $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 63, <2 x i64> zeroinitializer, i8 %__U)
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_shrdi_epi64(<2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shrdi_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdq $31, %xmm1, %xmm0, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 31, <2 x i64> zeroinitializer, i8 -1)
+ ret <2 x i64> %0
+}
+
+define <4 x i64> @test_mm256_mask_shrdi_epi32(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shrdi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdd $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shrdi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdd $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = bitcast <4 x i64> %__S to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 127, <8 x i32> %2, i8 %__U)
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+declare <8 x i32> @llvm.x86.avx512.mask.vpshrd.d.256(<8 x i32>, <8 x i32>, i32, <8 x i32>, i8)
+
+define <4 x i64> @test_mm256_maskz_shrdi_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shrdi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdd $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shrdi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdd $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 63, <8 x i32> zeroinitializer, i8 %__U)
+ %3 = bitcast <8 x i32> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_shrdi_epi32(<4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shrdi_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdd $31, %ymm1, %ymm0, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 31, <8 x i32> zeroinitializer, i8 -1)
+ %3 = bitcast <8 x i32> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <2 x i64> @test_mm_mask_shrdi_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shrdi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdd $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shrdi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdd $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = bitcast <2 x i64> %__S to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 127, <4 x i32> %2, i8 %__U)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32>, <4 x i32>, i32, <4 x i32>, i8)
+
+define <2 x i64> @test_mm_maskz_shrdi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shrdi_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdd $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shrdi_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdd $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 63, <4 x i32> zeroinitializer, i8 %__U)
+ %3 = bitcast <4 x i32> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_shrdi_epi32(<2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shrdi_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdd $31, %xmm1, %xmm0, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 31, <4 x i32> zeroinitializer, i8 -1)
+ %3 = bitcast <4 x i32> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <4 x i64> @test_mm256_mask_shrdi_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shrdi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdw $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shrdi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdw $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <16 x i16>
+ %1 = bitcast <4 x i64> %__B to <16 x i16>
+ %2 = bitcast <4 x i64> %__S to <16 x i16>
+ %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 127, <16 x i16> %2, i16 %__U)
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+declare <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16>, <16 x i16>, i32, <16 x i16>, i16)
+
+define <4 x i64> @test_mm256_maskz_shrdi_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shrdi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdw $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shrdi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdw $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <16 x i16>
+ %1 = bitcast <4 x i64> %__B to <16 x i16>
+ %2 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 63, <16 x i16> zeroinitializer, i16 %__U)
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_shrdi_epi16(<4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shrdi_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdw $31, %ymm1, %ymm0, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__A to <16 x i16>
+ %1 = bitcast <4 x i64> %__B to <16 x i16>
+ %2 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 31, <16 x i16> zeroinitializer, i16 -1)
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <2 x i64> @test_mm_mask_shrdi_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shrdi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdw $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shrdi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdw $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <8 x i16>
+ %1 = bitcast <2 x i64> %__B to <8 x i16>
+ %2 = bitcast <2 x i64> %__S to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 127, <8 x i16> %2, i8 %__U)
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16>, <8 x i16>, i32, <8 x i16>, i8)
+
+define <2 x i64> @test_mm_maskz_shrdi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shrdi_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdw $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shrdi_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdw $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <8 x i16>
+ %1 = bitcast <2 x i64> %__B to <8 x i16>
+ %2 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 63, <8 x i16> zeroinitializer, i8 %__U)
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_shrdi_epi16(<2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shrdi_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdw $31, %xmm1, %xmm0, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__A to <8 x i16>
+ %1 = bitcast <2 x i64> %__B to <8 x i16>
+ %2 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 31, <8 x i16> zeroinitializer, i8 -1)
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <4 x i64> @test_mm256_mask_shldv_epi64(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shldv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shldv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
+ ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_maskz_shldv_epi64(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shldv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shldv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
+ ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_shldv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shldv_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvq %ymm2, %ymm1, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 -1)
+ ret <4 x i64> %0
+}
+
+define <2 x i64> @test_mm_mask_shldv_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shldv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shldv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_maskz_shldv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shldv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shldv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_shldv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shldv_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvq %xmm2, %xmm1, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 -1)
+ ret <2 x i64> %0
+}
+
+define <4 x i64> @test_mm256_mask_shldv_epi32(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shldv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shldv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = bitcast <4 x i64> %__A to <8 x i32>
+ %2 = bitcast <4 x i64> %__B to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_maskz_shldv_epi32(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shldv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shldv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = bitcast <4 x i64> %__A to <8 x i32>
+ %2 = bitcast <4 x i64> %__B to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_shldv_epi32(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shldv_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvd %ymm2, %ymm1, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = bitcast <4 x i64> %__A to <8 x i32>
+ %2 = bitcast <4 x i64> %__B to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 -1)
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <2 x i64> @test_mm_mask_shldv_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shldv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shldv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = bitcast <2 x i64> %__A to <4 x i32>
+ %2 = bitcast <2 x i64> %__B to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_maskz_shldv_epi32(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shldv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shldv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = bitcast <2 x i64> %__A to <4 x i32>
+ %2 = bitcast <2 x i64> %__B to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_shldv_epi32(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shldv_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvd %xmm2, %xmm1, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = bitcast <2 x i64> %__A to <4 x i32>
+ %2 = bitcast <2 x i64> %__B to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 -1)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <4 x i64> @test_mm256_mask_shldv_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shldv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shldv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = bitcast <4 x i64> %__A to <16 x i16>
+ %2 = bitcast <4 x i64> %__B to <16 x i16>
+ %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_maskz_shldv_epi16(i16 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shldv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shldv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = bitcast <4 x i64> %__A to <16 x i16>
+ %2 = bitcast <4 x i64> %__B to <16 x i16>
+ %3 = tail call <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_shldv_epi16(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shldv_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvw %ymm2, %ymm1, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = bitcast <4 x i64> %__A to <16 x i16>
+ %2 = bitcast <4 x i64> %__B to <16 x i16>
+ %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 -1)
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <2 x i64> @test_mm_mask_shldv_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shldv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shldv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = bitcast <2 x i64> %__A to <8 x i16>
+ %2 = bitcast <2 x i64> %__B to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_maskz_shldv_epi16(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shldv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shldv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = bitcast <2 x i64> %__A to <8 x i16>
+ %2 = bitcast <2 x i64> %__B to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_shldv_epi16(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shldv_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshldvw %xmm2, %xmm1, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = bitcast <2 x i64> %__A to <8 x i16>
+ %2 = bitcast <2 x i64> %__B to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 -1)
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <4 x i64> @test_mm256_mask_shrdv_epi64(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shrdv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shrdv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
+ ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_maskz_shrdv_epi64(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shrdv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shrdv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
+ ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_shrdv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shrdv_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 -1)
+ ret <4 x i64> %0
+}
+
+define <2 x i64> @test_mm_mask_shrdv_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shrdv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shrdv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_maskz_shrdv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shrdv_epi64:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shrdv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_shrdv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shrdv_epi64:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 -1)
+ ret <2 x i64> %0
+}
+
+define <4 x i64> @test_mm256_mask_shrdv_epi32(<4 x i64> %__S, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shrdv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shrdv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = bitcast <4 x i64> %__A to <8 x i32>
+ %2 = bitcast <4 x i64> %__B to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_maskz_shrdv_epi32(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shrdv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shrdv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = bitcast <4 x i64> %__A to <8 x i32>
+ %2 = bitcast <4 x i64> %__B to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_shrdv_epi32(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shrdv_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = bitcast <4 x i64> %__A to <8 x i32>
+ %2 = bitcast <4 x i64> %__B to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 -1)
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <2 x i64> @test_mm_mask_shrdv_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shrdv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shrdv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = bitcast <2 x i64> %__A to <4 x i32>
+ %2 = bitcast <2 x i64> %__B to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_maskz_shrdv_epi32(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shrdv_epi32:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shrdv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = bitcast <2 x i64> %__A to <4 x i32>
+ %2 = bitcast <2 x i64> %__B to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_shrdv_epi32(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shrdv_epi32:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = bitcast <2 x i64> %__A to <4 x i32>
+ %2 = bitcast <2 x i64> %__B to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 -1)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <4 x i64> @test_mm256_mask_shrdv_epi16(<4 x i64> %__S, i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_mask_shrdv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_shrdv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = bitcast <4 x i64> %__A to <16 x i16>
+ %2 = bitcast <4 x i64> %__B to <16 x i16>
+ %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_maskz_shrdv_epi16(i16 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; X32-LABEL: test_mm256_maskz_shrdv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_shrdv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = bitcast <4 x i64> %__A to <16 x i16>
+ %2 = bitcast <4 x i64> %__B to <16 x i16>
+ %3 = tail call <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_mm256_shrdv_epi16(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
+; ALL-LABEL: test_mm256_shrdv_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = bitcast <4 x i64> %__A to <16 x i16>
+ %2 = bitcast <4 x i64> %__B to <16 x i16>
+ %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 -1)
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <2 x i64> @test_mm_mask_shrdv_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_mask_shrdv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_shrdv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = bitcast <2 x i64> %__A to <8 x i16>
+ %2 = bitcast <2 x i64> %__B to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_maskz_shrdv_epi16(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; X32-LABEL: test_mm_maskz_shrdv_epi16:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovd %eax, %k1
+; X32-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_shrdv_epi16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1
+; X64-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = bitcast <2 x i64> %__A to <8 x i16>
+ %2 = bitcast <2 x i64> %__B to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <2 x i64> @test_mm_shrdv_epi16(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
+; ALL-LABEL: test_mm_shrdv_epi16:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0
+; ALL-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = bitcast <2 x i64> %__A to <8 x i16>
+ %2 = bitcast <2 x i64> %__B to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 -1)
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16>, <8 x i16>, i8)
+declare <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8>, <16 x i8>, i16)
+declare void @llvm.x86.avx512.mask.compress.store.w.128(i8*, <8 x i16>, i8)
+declare void @llvm.x86.avx512.mask.compress.store.b.128(i8*, <16 x i8>, i16)
+declare <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16>, <8 x i16>, i8)
+declare <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8>, <16 x i8>, i16)
+declare <8 x i16> @llvm.x86.avx512.mask.expand.load.w.128(i8*, <8 x i16>, i8)
+declare <16 x i8> @llvm.x86.avx512.mask.expand.load.b.128(i8*, <16 x i8>, i16)
+declare <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16>, <16 x i16>, i16)
+declare <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8>, <32 x i8>, i32)
+declare void @llvm.x86.avx512.mask.compress.store.w.256(i8*, <16 x i16>, i16)
+declare void @llvm.x86.avx512.mask.compress.store.b.256(i8*, <32 x i8>, i32)
+declare <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16>, <16 x i16>, i16)
+declare <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8>, <32 x i8>, i32)
+declare <16 x i16> @llvm.x86.avx512.mask.expand.load.w.256(i8*, <16 x i16>, i16)
+declare <32 x i8> @llvm.x86.avx512.mask.expand.load.b.256(i8*, <32 x i8>, i32)
+declare <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+declare <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+declare <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+declare <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+declare <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+declare <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+declare <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+declare <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+declare <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+declare <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+declare <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+declare <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+declare <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+declare <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
Modified: llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll?rev=333794&r1=333793&r2=333794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll Fri Jun 1 14:59:22 2018
@@ -2,7 +2,7 @@
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512vbmi,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=X32
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=X64
-; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vlbw-builtins.c
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vlvbmi-builtins.c
define <2 x i64> @test_mm_mask2_permutex2var_epi8(<2 x i64> %__A, <2 x i64> %__I, i16 zeroext %__U, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask2_permutex2var_epi8:
More information about the llvm-commits
mailing list