[llvm] r295731 - [X86][AVX2] Add AVX512 test targets to AVX2 shuffle combines.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 21 08:29:28 PST 2017


Author: rksimon
Date: Tue Feb 21 10:29:28 2017
New Revision: 295731

URL: http://llvm.org/viewvc/llvm-project?rev=295731&view=rev
Log:
[X86][AVX2] Add AVX512 test targets to AVX2 shuffle combines.

Modified:
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll?rev=295731&r1=295730&r2=295731&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll Tue Feb 21 10:29:28 2017
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX512
 
 declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>)
 declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>)
@@ -514,17 +516,29 @@ define <4 x i64> @combine_pshufb_as_zext
 }
 
 define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
-; X32-LABEL: combine_pshufb_as_vzmovl_64:
-; X32:       # BB#0:
-; X32-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
-; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_pshufb_as_vzmovl_64:
-; X64:       # BB#0:
-; X64-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
-; X64-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
-; X64-NEXT:    retq
+; X32-AVX2-LABEL: combine_pshufb_as_vzmovl_64:
+; X32-AVX2:       # BB#0:
+; X32-AVX2-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X32-AVX2-NEXT:    retl
+;
+; X32-AVX512-LABEL: combine_pshufb_as_vzmovl_64:
+; X32-AVX512:       # BB#0:
+; X32-AVX512-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; X32-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; X32-AVX512-NEXT:    retl
+;
+; X64-AVX2-LABEL: combine_pshufb_as_vzmovl_64:
+; X64-AVX2:       # BB#0:
+; X64-AVX2-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; X64-AVX2-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X64-AVX2-NEXT:    retq
+;
+; X64-AVX512-LABEL: combine_pshufb_as_vzmovl_64:
+; X64-AVX512:       # BB#0:
+; X64-AVX512-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; X64-AVX512-NEXT:    retq
   %1 = bitcast <4 x double> %a0 to <32 x i8>
   %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   %3 = bitcast <32 x i8> %2 to <4 x double>
@@ -532,17 +546,29 @@ define <4 x double> @combine_pshufb_as_v
 }
 
 define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
-; X32-LABEL: combine_pshufb_as_vzmovl_32:
-; X32:       # BB#0:
-; X32-NEXT:    vxorps %ymm1, %ymm1, %ymm1
-; X32-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_pshufb_as_vzmovl_32:
-; X64:       # BB#0:
-; X64-NEXT:    vxorps %ymm1, %ymm1, %ymm1
-; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
-; X64-NEXT:    retq
+; X32-AVX2-LABEL: combine_pshufb_as_vzmovl_32:
+; X32-AVX2:       # BB#0:
+; X32-AVX2-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X32-AVX2-NEXT:    retl
+;
+; X32-AVX512-LABEL: combine_pshufb_as_vzmovl_32:
+; X32-AVX512:       # BB#0:
+; X32-AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X32-AVX512-NEXT:    vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X32-AVX512-NEXT:    retl
+;
+; X64-AVX2-LABEL: combine_pshufb_as_vzmovl_32:
+; X64-AVX2:       # BB#0:
+; X64-AVX2-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X64-AVX2-NEXT:    retq
+;
+; X64-AVX512-LABEL: combine_pshufb_as_vzmovl_32:
+; X64-AVX512:       # BB#0:
+; X64-AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X64-AVX512-NEXT:    vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X64-AVX512-NEXT:    retq
   %1 = bitcast <8 x float> %a0 to <32 x i8>
   %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   %3 = bitcast <32 x i8> %2 to <8 x float>




More information about the llvm-commits mailing list