[llvm] r274503 - [X86][AVX512] Added VPERMPD/VPERMQ intrinsics fast-isel generic IR tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 4 06:43:10 PDT 2016


Author: rksimon
Date: Mon Jul  4 08:43:10 2016
New Revision: 274503

URL: http://llvm.org/viewvc/llvm-project?rev=274503&view=rev
Log:
[X86][AVX512] Added VPERMPD/VPERMQ intrinsics fast-isel generic IR tests

Modified:
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll?rev=274503&r1=274502&r2=274503&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll Mon Jul  4 08:43:10 2016
@@ -264,6 +264,110 @@ define <16 x float> @test_mm512_maskz_pe
   ret <16 x float> %res1
 }
 
+define <8 x i64> @test_mm512_permutex_epi64(<8 x i64> %a0) {
+; X32-LABEL: test_mm512_permutex_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_permutex_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
+; X64-NEXT:    retq
+  %res = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+  ret <8 x i64> %res
+}
+
+define <8 x i64> @test_mm512_mask_permutex_epi64(<8 x i64> %a0, i8 %a1, <8 x i64> %a2) {
+; X32-LABEL: test_mm512_mask_permutex_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermq {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_permutex_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermq {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
+; X64-NEXT:    retq
+  %arg1 = bitcast i8 %a1 to <8 x i1>
+  %res0 = shufflevector <8 x i64> %a2, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+  %res1 = select <8 x i1> %arg1, <8 x i64> %res0, <8 x i64> %a0
+  ret <8 x i64> %res1
+}
+
+define <8 x i64> @test_mm512_maskz_permutex_epi64(i8 %a0, <8 x i64> %a1) {
+; X32-LABEL: test_mm512_maskz_permutex_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_permutex_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
+; X64-NEXT:    retq
+  %arg0 = bitcast i8 %a0 to <8 x i1>
+  %res0 = shufflevector <8 x i64> %a1, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+  %res1 = select <8 x i1> %arg0, <8 x i64> %res0, <8 x i64> zeroinitializer
+  ret <8 x i64> %res1
+}
+
+define <8 x double> @test_mm512_permutex_pd(<8 x double> %a0) {
+; X32-LABEL: test_mm512_permutex_pd:
+; X32:       # BB#0:
+; X32-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_permutex_pd:
+; X64:       # BB#0:
+; X64-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
+; X64-NEXT:    retq
+  %res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+  ret <8 x double> %res
+}
+
+define <8 x double> @test_mm512_mask_permutex_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
+; X32-LABEL: test_mm512_mask_permutex_pd:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermpd {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_permutex_pd:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermpd {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
+; X64-NEXT:    retq
+  %arg1 = bitcast i8 %a1 to <8 x i1>
+  %res0 = shufflevector <8 x double> %a2, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+  %res1 = select <8 x i1> %arg1, <8 x double> %res0, <8 x double> %a0
+  ret <8 x double> %res1
+}
+
+define <8 x double> @test_mm512_maskz_permutex_pd(i8 %a0, <8 x double> %a1) {
+; X32-LABEL: test_mm512_maskz_permutex_pd:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_permutex_pd:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
+; X64-NEXT:    retq
+  %arg0 = bitcast i8 %a0 to <8 x i1>
+  %res0 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+  %res1 = select <8 x i1> %arg0, <8 x double> %res0, <8 x double> zeroinitializer
+  ret <8 x double> %res1
+}
+
 define <8 x i64> @test_mm512_shuffle_epi32(<8 x i64> %a0) {
 ; X32-LABEL: test_mm512_shuffle_epi32:
 ; X32:       # BB#0:

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll?rev=274503&r1=274502&r2=274503&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll Mon Jul  4 08:43:10 2016
@@ -404,4 +404,152 @@ define <8 x float> @test_mm256_maskz_mov
   ret <8 x float> %res1
 }
 
+define <4 x i64> @test_mm256_permutex_epi64(<4 x i64> %a0) {
+; X32-LABEL: test_mm256_permutex_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,0,0,0]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_permutex_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,0,0,0]
+; X64-NEXT:    retq
+  %res = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
+  ret <4 x i64> %res
+}
+
+define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64> %a2) {
+; X32-LABEL: test_mm256_mask_permutex_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp8:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermq {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0]
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_permutex_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpermq {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0]
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a1 to i4
+  %arg1 = bitcast i4 %trn1 to <4 x i1>
+  %res0 = shufflevector <4 x i64> %a2, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+  %res1 = select <4 x i1> %arg1, <4 x i64> %res0, <4 x i64> %a0
+  ret <4 x i64> %res1
+}
+
+define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) {
+; X32-LABEL: test_mm256_maskz_permutex_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp9:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0]
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_permutex_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0]
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a0 to i4
+  %arg0 = bitcast i4 %trn1 to <4 x i1>
+  %res0 = shufflevector <4 x i64> %a1, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+  %res1 = select <4 x i1> %arg0, <4 x i64> %res0, <4 x i64> zeroinitializer
+  ret <4 x i64> %res1
+}
+
+define <4 x double> @test_mm256_permutex_pd(<4 x double> %a0) {
+; X32-LABEL: test_mm256_permutex_pd:
+; X32:       # BB#0:
+; X32-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,0,0,0]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_permutex_pd:
+; X64:       # BB#0:
+; X64-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,0,0,0]
+; X64-NEXT:    retq
+  %res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
+  ret <4 x double> %res
+}
+
+define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x double> %a2) {
+; X32-LABEL: test_mm256_mask_permutex_pd:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp10:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermpd {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0]
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_permutex_pd:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpermpd {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0]
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a1 to i4
+  %arg1 = bitcast i4 %trn1 to <4 x i1>
+  %res0 = shufflevector <4 x double> %a2, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+  %res1 = select <4 x i1> %arg1, <4 x double> %res0, <4 x double> %a0
+  ret <4 x double> %res1
+}
+
+define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) {
+; X32-LABEL: test_mm256_maskz_permutex_pd:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp11:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0]
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_permutex_pd:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0]
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a0 to i4
+  %arg0 = bitcast i4 %trn1 to <4 x i1>
+  %res0 = shufflevector <4 x double> %a1, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+  %res1 = select <4 x i1> %arg0, <4 x double> %res0, <4 x double> zeroinitializer
+  ret <4 x double> %res1
+}
+
 !0 = !{i32 1}




More information about the llvm-commits mailing list