[llvm] r309315 - Change prefix in vector-shuffle-combining-avx.patch to reduce test size.

Dinar Temirbulatov via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 27 12:47:35 PDT 2017


Author: dinar
Date: Thu Jul 27 12:47:35 2017
New Revision: 309315

URL: http://llvm.org/viewvc/llvm-project?rev=309315&view=rev
Log:
Change prefix in vector-shuffle-combining-avx.patch to reduce test size.

Modified:
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll?rev=309315&r1=309314&r2=309315&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll Thu Jul 27 12:47:35 2017
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32-AVX
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32-AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X64-AVX512
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX512
 ;
 ; Combine tests involving AVX target shuffles
 
@@ -27,21 +27,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32:       # BB#0:
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_identity:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_identity:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_identity:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_identity:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_identity:
 ; X64:       # BB#0:
 ; X64-NEXT:    retq
@@ -62,25 +47,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_movddup:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_movddup:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_movddup:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_movddup:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_movddup:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
@@ -103,27 +69,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_movddup_load:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_movddup_load:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_movddup_load:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_movddup_load:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_movddup_load:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
@@ -148,25 +93,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_movshdup:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_movshdup:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_movshdup:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_movshdup:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_movshdup:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -189,25 +115,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_movsldup:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_movsldup:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_movsldup:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_movsldup:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_movsldup:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
@@ -230,25 +137,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_unpckh:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_unpckh:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_unpckh:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_unpckh:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_unpckh:
 ; X64:       # BB#0:
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -271,25 +159,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_unpckl:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_unpckl:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_unpckl:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_unpckl:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_unpckl:
 ; X64:       # BB#0:
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -311,21 +180,6 @@ define <8 x float> @combine_vpermilvar_8
 ; X32:       # BB#0:
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_8f32_identity:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_8f32_identity:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_8f32_identity:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_8f32_identity:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_8f32_identity:
 ; X64:       # BB#0:
 ; X64-NEXT:    retq
@@ -346,25 +200,6 @@ define <8 x float> @combine_vpermilvar_8
 ; X32-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_8f32_10326u4u:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_8f32_10326u4u:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_8f32_10326u4u:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_8f32_10326u4u:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_8f32_10326u4u:
 ; X64:       # BB#0:
 ; X64-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
@@ -388,25 +223,6 @@ define <8 x float> @combine_vpermilvar_v
 ; X32-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_vperm2f128_8f32:
 ; X64:       # BB#0:
 ; X64-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
@@ -431,25 +247,6 @@ define <8 x float> @combine_vpermilvar_v
 ; X32-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
 ; X64:       # BB#0:
 ; X64-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
@@ -475,29 +272,6 @@ define <4 x double> @combine_vperm2f128_
 ; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X32-AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X32-AVX512-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X64-AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X64-AVX512-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
 ; X64:       # BB#0:
 ; X64-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
@@ -525,25 +299,6 @@ define <8 x float> @combine_vpermilvar_8
 ; X32-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_8f32_movddup:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_8f32_movddup:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_8f32_movddup:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_8f32_movddup:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_8f32_movddup:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
@@ -566,27 +321,6 @@ define <8 x float> @combine_vpermilvar_8
 ; X32-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_8f32_movddup_load:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_8f32_movddup_load:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_8f32_movddup_load:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_8f32_movddup_load:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_8f32_movddup_load:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
@@ -611,25 +345,6 @@ define <8 x float> @combine_vpermilvar_8
 ; X32-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_8f32_movshdup:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_8f32_movshdup:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_8f32_movshdup:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_8f32_movshdup:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_8f32_movshdup:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
@@ -652,25 +367,6 @@ define <8 x float> @combine_vpermilvar_8
 ; X32-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_8f32_movsldup:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_8f32_movsldup:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_8f32_movsldup:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_8f32_movsldup:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_8f32_movsldup:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
@@ -692,21 +388,6 @@ define <2 x double> @combine_vpermilvar_
 ; X32:       # BB#0:
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_2f64_identity:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_2f64_identity:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_2f64_identity:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_2f64_identity:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_2f64_identity:
 ; X64:       # BB#0:
 ; X64-NEXT:    retq
@@ -727,25 +408,6 @@ define <2 x double> @combine_vpermilvar_
 ; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_2f64_movddup:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_2f64_movddup:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_2f64_movddup:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_2f64_movddup:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_2f64_movddup:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
@@ -767,21 +429,6 @@ define <4 x double> @combine_vpermilvar_
 ; X32:       # BB#0:
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f64_identity:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f64_identity:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f64_identity:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f64_identity:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f64_identity:
 ; X64:       # BB#0:
 ; X64-NEXT:    retq
@@ -802,25 +449,6 @@ define <4 x double> @combine_vpermilvar_
 ; X32-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f64_movddup:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f64_movddup:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f64_movddup:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f64_movddup:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f64_movddup:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
@@ -843,25 +471,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_4stage:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_4stage:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_4stage:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_4stage:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_4stage:
 ; X64:       # BB#0:
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
@@ -887,25 +496,6 @@ define <8 x float> @combine_vpermilvar_8
 ; X32-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_8f32_4stage:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_8f32_4stage:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_8f32_4stage:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_8f32_4stage:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_8f32_4stage:
 ; X64:       # BB#0:
 ; X64-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
@@ -931,25 +521,6 @@ define <4 x float> @combine_vpermilvar_4
 ; X32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: combine_vpermilvar_4f32_as_insertps:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: combine_vpermilvar_4f32_as_insertps:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: combine_vpermilvar_4f32_as_insertps:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_4f32_as_insertps:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: combine_vpermilvar_4f32_as_insertps:
 ; X64:       # BB#0:
 ; X64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
@@ -973,25 +544,6 @@ define <2 x double> @constant_fold_vperm
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [2.000000e+00,1.000000e+00]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: constant_fold_vpermilvar_pd:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [2.000000e+00,1.000000e+00]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: constant_fold_vpermilvar_pd:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovaps {{.*#+}} xmm0 = [2.000000e+00,1.000000e+00]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: constant_fold_vpermilvar_pd:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [2.000000e+00,1.000000e+00]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: constant_fold_vpermilvar_pd:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovaps {{.*#+}} xmm0 = [2.000000e+00,1.000000e+00]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: constant_fold_vpermilvar_pd:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [2.000000e+00,1.000000e+00]
@@ -1014,25 +566,6 @@ define <4 x double> @constant_fold_vperm
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [2.000000e+00,1.000000e+00,3.000000e+00,4.000000e+00]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: constant_fold_vpermilvar_pd_256:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [2.000000e+00,1.000000e+00,3.000000e+00,4.000000e+00]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: constant_fold_vpermilvar_pd_256:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovaps {{.*#+}} ymm0 = [2.000000e+00,1.000000e+00,3.000000e+00,4.000000e+00]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: constant_fold_vpermilvar_pd_256:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [2.000000e+00,1.000000e+00,3.000000e+00,4.000000e+00]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: constant_fold_vpermilvar_pd_256:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovaps {{.*#+}} ymm0 = [2.000000e+00,1.000000e+00,3.000000e+00,4.000000e+00]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: constant_fold_vpermilvar_pd_256:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [2.000000e+00,1.000000e+00,3.000000e+00,4.000000e+00]
@@ -1055,25 +588,6 @@ define <4 x float> @constant_fold_vpermi
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [4.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: constant_fold_vpermilvar_ps:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [4.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: constant_fold_vpermilvar_ps:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovaps {{.*#+}} xmm0 = [4.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: constant_fold_vpermilvar_ps:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [4.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: constant_fold_vpermilvar_ps:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovaps {{.*#+}} xmm0 = [4.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: constant_fold_vpermilvar_ps:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [4.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00]
@@ -1096,25 +610,6 @@ define <8 x float> @constant_fold_vpermi
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00,5.000000e+00,6.000000e+00,6.000000e+00,6.000000e+00]
 ; X32-NEXT:    retl
 ;
-; X32-AVX-LABEL: constant_fold_vpermilvar_ps_256:
-; X32-AVX:       # BB#0:
-; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00,5.000000e+00,6.000000e+00,6.000000e+00,6.000000e+00]
-; X32-AVX-NEXT:    retl
-;
-; X32-AVX512-LABEL: constant_fold_vpermilvar_ps_256:
-; X32-AVX512:       # BB#0:
-; X32-AVX512-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00,5.000000e+00,6.000000e+00,6.000000e+00,6.000000e+00]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX-LABEL: constant_fold_vpermilvar_ps_256:
-; X64-AVX:       # BB#0:
-; X64-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00,5.000000e+00,6.000000e+00,6.000000e+00,6.000000e+00]
-; X64-AVX-NEXT:    retq
-;
-; X64-AVX512-LABEL: constant_fold_vpermilvar_ps_256:
-; X64-AVX512:       # BB#0:
-; X64-AVX512-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00,5.000000e+00,6.000000e+00,6.000000e+00,6.000000e+00]
-; X64-AVX512-NEXT:    retq
 ; X64-LABEL: constant_fold_vpermilvar_ps_256:
 ; X64:       # BB#0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00,5.000000e+00,6.000000e+00,6.000000e+00,6.000000e+00]




More information about the llvm-commits mailing list