[llvm] r281833 - [X86][AVX] Test target shuffle combining on 32 and 64-bit targets

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 17 11:42:42 PDT 2016


Author: rksimon
Date: Sat Sep 17 13:42:41 2016
New Revision: 281833

URL: http://llvm.org/viewvc/llvm-project?rev=281833&view=rev
Log:
[X86][AVX] Test target shuffle combining on 32 and 64-bit targets

Modified:
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll?rev=281833&r1=281832&r2=281833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll Sat Sep 17 13:42:41 2016
@@ -1,7 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X64
 ;
 ; Combine tests involving AVX target shuffles
 
@@ -20,92 +23,141 @@ declare <8 x float> @llvm.x86.avx.vperm2
 declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8)
 
 define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_identity:
-; ALL:       # BB#0:
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_identity:
+; X32:       # BB#0:
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
   %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>  %1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
   ret <4 x float> %2
 }
 
 define <4 x float> @combine_vpermilvar_4f32_movddup(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_movddup:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_movddup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_movddup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT:    retq
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
   ret <4 x float> %1
 }
 define <4 x float> @combine_vpermilvar_4f32_movddup_load(<4 x float> *%a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_movddup_load:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_movddup_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_movddup_load:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-NEXT:    retq
   %1 = load <4 x float>, <4 x float> *%a0
   %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
   ret <4 x float> %2
 }
 
 define <4 x float> @combine_vpermilvar_4f32_movshdup(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_movshdup:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_movshdup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_movshdup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-NEXT:    retq
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 undef, i32 1, i32 3, i32 3>)
   ret <4 x float> %1
 }
 
 define <4 x float> @combine_vpermilvar_4f32_movsldup(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_movsldup:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_movsldup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_movsldup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
+; X64-NEXT:    retq
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 undef>)
   ret <4 x float> %1
 }
 
 define <4 x float> @combine_vpermilvar_4f32_unpckh(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_unpckh:
-; ALL:       # BB#0:
-; ALL-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_unpckh:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_unpckh:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; X64-NEXT:    retq
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 2, i32 2, i32 3, i32 3>)
   ret <4 x float> %1
 }
 
 define <4 x float> @combine_vpermilvar_4f32_unpckl(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_unpckl:
-; ALL:       # BB#0:
-; ALL-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_unpckl:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_unpckl:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X64-NEXT:    retq
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 1, i32 1>)
   ret <4 x float> %1
 }
 
 define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_8f32_identity:
-; ALL:       # BB#0:
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_8f32_identity:
+; X32:       # BB#0:
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_8f32_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 undef>)
   %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %1, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>)
   ret <8 x float> %2
 }
 
 define <8 x float> @combine_vpermilvar_8f32_10326u4u(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_8f32_10326u4u:
-; ALL:       # BB#0:
-; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_8f32_10326u4u:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_8f32_10326u4u:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
+; X64-NEXT:    retq
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 0, i32 1, i32 2, i32 undef>)
   %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 undef>)
   ret <8 x float> %2
 }
 
 define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; ALL:       # BB#0:
-; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_vperm2f128_8f32:
+; X32:       # BB#0:
+; X32-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_vperm2f128_8f32:
+; X64:       # BB#0:
+; X64-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; X64-NEXT:    retq
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
   %2 = shufflevector <8 x float> %1, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
   %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@@ -113,10 +165,15 @@ define <8 x float> @combine_vpermilvar_v
 }
 
 define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; ALL:       # BB#0:
-; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
+; X32:       # BB#0:
+; X32-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
+; X64:       # BB#0:
+; X64-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
+; X64-NEXT:    retq
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
   %2 = shufflevector <8 x float> %1, <8 x float> zeroinitializer, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
   %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@@ -124,11 +181,17 @@ define <8 x float> @combine_vpermilvar_v
 }
 
 define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0) {
-; ALL-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; ALL:       # BB#0:
-; ALL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
-; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
+; X32:       # BB#0:
+; X32-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
+; X64:       # BB#0:
+; X64-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; X64-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; X64-NEXT:    retq
   %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
   %2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
@@ -136,82 +199,130 @@ define <4 x double> @combine_vperm2f128_
 }
 
 define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_8f32_movddup:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_8f32_movddup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_8f32_movddup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; X64-NEXT:    retq
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
   ret <8 x float> %1
 }
 define <8 x float> @combine_vpermilvar_8f32_movddup_load(<8 x float> *%a0) {
-; ALL-LABEL: combine_vpermilvar_8f32_movddup_load:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_8f32_movddup_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_8f32_movddup_load:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
+; X64-NEXT:    retq
   %1 = load <8 x float>, <8 x float> *%a0
   %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
   ret <8 x float> %2
 }
 
 define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_8f32_movshdup:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_8f32_movshdup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_8f32_movshdup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; X64-NEXT:    retq
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>)
   ret <8 x float> %1
 }
 
 define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_8f32_movsldup:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_8f32_movsldup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_8f32_movsldup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; X64-NEXT:    retq
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>)
   ret <8 x float> %1
 }
 
 define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
-; ALL-LABEL: combine_vpermilvar_2f64_identity:
-; ALL:       # BB#0:
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_2f64_identity:
+; X32:       # BB#0:
+; X32-NEXT:    movl $2, %eax
+; X32-NEXT:    vmovd %eax, %xmm1
+; X32-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0
+; X32-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_2f64_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
   %2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>  %1, <2 x i64> <i64 2, i64 0>)
   ret <2 x double> %2
 }
 
 define <2 x double> @combine_vpermilvar_2f64_movddup(<2 x double> %a0) {
-; ALL-LABEL: combine_vpermilvar_2f64_movddup:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_2f64_movddup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_2f64_movddup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT:    retq
   %1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 0, i64 0>)
   ret <2 x double> %1
 }
 
 define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f64_identity:
-; ALL:       # BB#0:
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f64_identity:
+; X32:       # BB#0:
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f64_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
   %2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>  %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
   ret <4 x double> %2
 }
 
 define <4 x double> @combine_vpermilvar_4f64_movddup(<4 x double> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f64_movddup:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f64_movddup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f64_movddup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; X64-NEXT:    retq
   %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 0, i64 0, i64 4, i64 4>)
   ret <4 x double> %1
 }
 
 define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_4stage:
-; ALL:       # BB#0:
-; ALL-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_4stage:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_4stage:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
+; X64-NEXT:    retq
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
   %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>  %1, <4 x i32> <i32 2, i32 3, i32 0, i32 1>)
   %3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>  %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>)
@@ -220,10 +331,15 @@ define <4 x float> @combine_vpermilvar_4
 }
 
 define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_8f32_4stage:
-; ALL:       # BB#0:
-; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_8f32_4stage:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_8f32_4stage:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
+; X64-NEXT:    retq
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
   %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>)
   %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %2, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 0, i32 2, i32 1, i32 3>)
@@ -232,48 +348,77 @@ define <8 x float> @combine_vpermilvar_8
 }
 
 define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32_as_insertps:
-; ALL:       # BB#0:
-; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
-; ALL-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_4f32_as_insertps:
+; X32:       # BB#0:
+; X32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_4f32_as_insertps:
+; X64:       # BB#0:
+; X64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
+; X64-NEXT:    retq
   %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
   %2 = shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 1, i32 4>
   ret <4 x float> %2
 }
 
 define <2 x double> @constant_fold_vpermilvar_pd() {
-; ALL-LABEL: constant_fold_vpermilvar_pd:
-; ALL:       # BB#0:
-; ALL-NEXT:    vpermilpd {{.*#+}} xmm0 = mem[1,0]
-; ALL-NEXT:    retq
+; X32-LABEL: constant_fold_vpermilvar_pd:
+; X32:       # BB#0:
+; X32-NEXT:    movl $2, %eax
+; X32-NEXT:    vmovd %eax, %xmm0
+; X32-NEXT:    vmovapd {{.*#+}} xmm1 = [1.000000e+00,2.000000e+00]
+; X32-NEXT:    vpermilpd %xmm0, %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: constant_fold_vpermilvar_pd:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilpd {{.*#+}} xmm0 = mem[1,0]
+; X64-NEXT:    retq
   %1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> <double 1.0, double 2.0>, <2 x i64> <i64 2, i64 0>)
   ret <2 x double> %1
 }
 
 define <4 x double> @constant_fold_vpermilvar_pd_256() {
-; ALL-LABEL: constant_fold_vpermilvar_pd_256:
-; ALL:       # BB#0:
-; ALL-NEXT:    vpermilpd {{.*#+}} ymm0 = mem[1,0,2,3]
-; ALL-NEXT:    retq
+; X32-LABEL: constant_fold_vpermilvar_pd_256:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilpd {{.*#+}} ymm0 = mem[1,0,2,3]
+; X32-NEXT:    retl
+;
+; X64-LABEL: constant_fold_vpermilvar_pd_256:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilpd {{.*#+}} ymm0 = mem[1,0,2,3]
+; X64-NEXT:    retq
   %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
   ret <4 x double> %1
 }
 
 define <4 x float> @constant_fold_vpermilvar_ps() {
-; ALL-LABEL: constant_fold_vpermilvar_ps:
-; ALL:       # BB#0:
-; ALL-NEXT:    vpermilps {{.*#+}} xmm0 = mem[3,0,2,1]
-; ALL-NEXT:    retq
+; X32-LABEL: constant_fold_vpermilvar_ps:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilps {{.*#+}} xmm0 = mem[3,0,2,1]
+; X32-NEXT:    retl
+;
+; X64-LABEL: constant_fold_vpermilvar_ps:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} xmm0 = mem[3,0,2,1]
+; X64-NEXT:    retq
   %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x i32> <i32 3, i32 0, i32 2, i32 1>)
   ret <4 x float> %1
 }
 
 define <8 x float> @constant_fold_vpermilvar_ps_256() {
-; ALL-LABEL: constant_fold_vpermilvar_ps_256:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00]
-; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,5,5]
-; ALL-NEXT:    retq
+; X32-LABEL: constant_fold_vpermilvar_ps_256:
+; X32:       # BB#0:
+; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00]
+; X32-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,5,5]
+; X32-NEXT:    retl
+;
+; X64-LABEL: constant_fold_vpermilvar_ps_256:
+; X64:       # BB#0:
+; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00]
+; X64-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,5,5]
+; X64-NEXT:    retq
   %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 1, i32 0, i32 1, i32 1, i32 1>)
   ret <8 x float> %1
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll?rev=281833&r1=281832&r2=281833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll Sat Sep 17 13:42:41 2016
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
 
 declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>)
 declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>)
@@ -7,30 +8,45 @@ declare <16 x i8> @llvm.x86.ssse3.pshuf.
 declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>)
 
 define <32 x i8> @combine_pshufb_pslldq(<32 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_pslldq:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vxorps %ymm0, %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_pslldq:
+; X32:       # BB#0:
+; X32-NEXT:    vxorps %ymm0, %ymm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_pslldq:
+; X64:       # BB#0:
+; X64-NEXT:    vxorps %ymm0, %ymm0, %ymm0
+; X64-NEXT:    retq
   %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
   %2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
   ret <32 x i8> %2
 }
 
 define <32 x i8> @combine_pshufb_psrldq(<32 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_psrldq:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vxorps %ymm0, %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_psrldq:
+; X32:       # BB#0:
+; X32-NEXT:    vxorps %ymm0, %ymm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_psrldq:
+; X64:       # BB#0:
+; X64-NEXT:    vxorps %ymm0, %ymm0, %ymm0
+; X64-NEXT:    retq
   %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
   %2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
   ret <32 x i8> %2
 }
 
 define <32 x i8> @combine_pshufb_vpermd(<8 x i32> %a) {
-; CHECK-LABEL: combine_pshufb_vpermd:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_vpermd:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_vpermd:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
+; X64-NEXT:    retq
   %tmp0 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4>)
   %tmp1 = bitcast <8 x i32> %tmp0 to <32 x i8>
   %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 30>
@@ -38,10 +54,15 @@ define <32 x i8> @combine_pshufb_vpermd(
 }
 
 define <32 x i8> @combine_pshufb_vpermps(<8 x float> %a) {
-; CHECK-LABEL: combine_pshufb_vpermps:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_vpermps:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_vpermps:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
+; X64-NEXT:    retq
   %tmp0 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4>)
   %tmp1 = bitcast <8 x float> %tmp0 to <32 x i8>
   %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 30>
@@ -49,11 +70,17 @@ define <32 x i8> @combine_pshufb_vpermps
 }
 
 define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) {
-; CHECK-LABEL: combine_permq_pshufb_as_vperm2i128:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
-; CHECK-NEXT:    vpaddq {{.*}}(%rip), %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permq_pshufb_as_vperm2i128:
+; X32:       # BB#0:
+; X32-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
+; X32-NEXT:    vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permq_pshufb_as_vperm2i128:
+; X64:       # BB#0:
+; X64-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
+; X64-NEXT:    vpaddq {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   %2 = bitcast <4 x i64> %1 to <32 x i8>
   %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255>)
@@ -63,11 +90,17 @@ define <4 x i64> @combine_permq_pshufb_a
 }
 
 define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) {
-; CHECK-LABEL: combine_permq_pshufb_as_vpblendd:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permq_pshufb_as_vpblendd:
+; X32:       # BB#0:
+; X32-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; X32-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permq_pshufb_as_vpblendd:
+; X64:       # BB#0:
+; X64-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; X64-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; X64-NEXT:    retq
   %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
   %2 = bitcast <4 x i64> %1 to <32 x i8>
   %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255>)
@@ -75,20 +108,31 @@ define <32 x i8> @combine_permq_pshufb_a
 }
 
 define <16 x i8> @combine_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
-; CHECK-LABEL: combine_pshufb_as_vpbroadcastb128:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastb %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vpbroadcastb128:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastb %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vpbroadcastb128:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastb %xmm0, %xmm0
+; X64-NEXT:    retq
   %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> zeroinitializer)
   ret <16 x i8> %1
 }
 
 define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) {
-; CHECK-LABEL: combine_pshufb_as_vpbroadcastb256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; CHECK-NEXT:    vpbroadcastb %xmm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vpbroadcastb256:
+; X32:       # BB#0:
+; X32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT:    vpbroadcastb %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vpbroadcastb256:
+; X64:       # BB#0:
+; X64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT:    vpbroadcastb %xmm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
   %2 = bitcast <4 x i64> %1 to <32 x i8>
   %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> zeroinitializer)
@@ -99,20 +143,31 @@ define <32 x i8> @combine_pshufb_as_vpbr
 }
 
 define <16 x i8> @combine_pshufb_as_vpbroadcastw128(<16 x i8> %a) {
-; CHECK-LABEL: combine_pshufb_as_vpbroadcastw128:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastw %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vpbroadcastw128:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastw %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vpbroadcastw128:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastw %xmm0, %xmm0
+; X64-NEXT:    retq
   %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>)
   ret <16 x i8> %1
 }
 
 define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) {
-; CHECK-LABEL: combine_pshufb_as_vpbroadcastw256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; CHECK-NEXT:    vpbroadcastw %xmm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vpbroadcastw256:
+; X32:       # BB#0:
+; X32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT:    vpbroadcastw %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vpbroadcastw256:
+; X64:       # BB#0:
+; X64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT:    vpbroadcastw %xmm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
   %2 = bitcast <4 x i64> %1 to <32 x i8>
   %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>)
@@ -123,23 +178,36 @@ define <32 x i8> @combine_pshufb_as_vpbr
 }
 
 define <16 x i8> @combine_pshufb_as_vpbroadcastd128(<16 x i8> %a) {
-; CHECK-LABEL: combine_pshufb_as_vpbroadcastd128:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastd %xmm0, %xmm0
-; CHECK-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vpbroadcastd128:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastd %xmm0, %xmm0
+; X32-NEXT:    vpaddb {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vpbroadcastd128:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastd %xmm0, %xmm0
+; X64-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
   %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>)
   %2 = add <16 x i8> %1, <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>
   ret <16 x i8> %2
 }
 
 define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
-; CHECK-LABEL: combine_permd_as_vpbroadcastd256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; CHECK-NEXT:    vpbroadcastd %xmm0, %ymm0
-; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permd_as_vpbroadcastd256:
+; X32:       # BB#0:
+; X32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT:    vpbroadcastd %xmm0, %ymm0
+; X32-NEXT:    vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permd_as_vpbroadcastd256:
+; X64:       # BB#0:
+; X64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT:    vpbroadcastd %xmm0, %ymm0
+; X64-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %1, <8 x i32> zeroinitializer)
   %3 = add <8 x i32> %2, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -147,21 +215,33 @@ define <8 x i32> @combine_permd_as_vpbro
 }
 
 define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) {
-; CHECK-LABEL: combine_pshufb_as_vpbroadcastq128:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastq %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vpbroadcastq128:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastq %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vpbroadcastq128:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastq %xmm0, %xmm0
+; X64-NEXT:    retq
   %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
   ret <16 x i8> %1
 }
 
 define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
-; CHECK-LABEL: combine_permd_as_vpbroadcastq256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; CHECK-NEXT:    vpbroadcastq %xmm0, %ymm0
-; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permd_as_vpbroadcastq256:
+; X32:       # BB#0:
+; X32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT:    vpbroadcastq %xmm0, %ymm0
+; X32-NEXT:    vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permd_as_vpbroadcastq256:
+; X64:       # BB#0:
+; X64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT:    vpbroadcastq %xmm0, %ymm0
+; X64-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %1, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>)
   %3 = add <8 x i32> %2, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -169,10 +249,15 @@ define <8 x i32> @combine_permd_as_vpbro
 }
 
 define <4 x float> @combine_pshufb_as_vpbroadcastss128(<4 x float> %a) {
-; CHECK-LABEL: combine_pshufb_as_vpbroadcastss128:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vbroadcastss %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vpbroadcastss128:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastss %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vpbroadcastss128:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastss %xmm0, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <4 x float> %a to <16 x i8>
   %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>)
   %3 = bitcast <16 x i8> %2 to <4 x float>
@@ -180,22 +265,34 @@ define <4 x float> @combine_pshufb_as_vp
 }
 
 define <8 x float> @combine_permd_as_vpbroadcastss256(<4 x float> %a) {
-; CHECK-LABEL: combine_permd_as_vpbroadcastss256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; CHECK-NEXT:    vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permd_as_vpbroadcastss256:
+; X32:       # BB#0:
+; X32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT:    vbroadcastss %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permd_as_vpbroadcastss256:
+; X64:       # BB#0:
+; X64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT:    vbroadcastss %xmm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %1, <8 x i32> zeroinitializer)
   ret <8 x float> %2
 }
 
 define <4 x double> @combine_permd_as_vpbroadcastsd256(<2 x double> %a) {
-; CHECK-LABEL: combine_permd_as_vpbroadcastsd256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; CHECK-NEXT:    vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permd_as_vpbroadcastsd256:
+; X32:       # BB#0:
+; X32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT:    vbroadcastsd %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permd_as_vpbroadcastsd256:
+; X64:       # BB#0:
+; X64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT:    vbroadcastsd %xmm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
   %2 = bitcast <4 x double> %1 to <8 x float>
   %3 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %2, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>)
@@ -204,30 +301,45 @@ define <4 x double> @combine_permd_as_vp
 }
 
 define <16 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
-; CHECK-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb128:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastb %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb128:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastb %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb128:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastb %xmm0, %xmm0
+; X64-NEXT:    retq
   %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
   %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> zeroinitializer)
   ret <16 x i8> %2
 }
 
 define <32 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb256(<32 x i8> %a) {
-; CHECK-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastb %xmm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb256:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastb %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb256:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastb %xmm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
   %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> zeroinitializer)
   ret <32 x i8> %2
 }
 
 define <4 x float> @combine_vpbroadcast_pshufb_as_vpbroadcastss128(<4 x float> %a) {
-; CHECK-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastss128:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vbroadcastss %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastss128:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastss %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastss128:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastss %xmm0, %xmm0
+; X64-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
   %2 = bitcast <4 x float> %1 to <16 x i8>
   %3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>)
@@ -236,22 +348,34 @@ define <4 x float> @combine_vpbroadcast_
 }
 
 define <8 x float> @combine_vpbroadcast_permd_as_vpbroadcastss256(<4 x float> %a) {
-; CHECK-LABEL: combine_vpbroadcast_permd_as_vpbroadcastss256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT:    vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpbroadcast_permd_as_vpbroadcastss256:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastss %xmm0, %ymm0
+; X32-NEXT:    vbroadcastss %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpbroadcast_permd_as_vpbroadcastss256:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastss %xmm0, %ymm0
+; X64-NEXT:    vbroadcastss %xmm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> zeroinitializer
   %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %1, <8 x i32> zeroinitializer)
   ret <8 x float> %2
 }
 
 define <4 x double> @combine_vpbroadcast_permd_as_vpbroadcastsd256(<2 x double> %a) {
-; CHECK-LABEL: combine_vpbroadcast_permd_as_vpbroadcastsd256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT:    vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpbroadcast_permd_as_vpbroadcastsd256:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastsd %xmm0, %ymm0
+; X32-NEXT:    vbroadcastsd %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpbroadcast_permd_as_vpbroadcastsd256:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastsd %xmm0, %ymm0
+; X64-NEXT:    vbroadcastsd %xmm0, %ymm0
+; X64-NEXT:    retq
   %1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> zeroinitializer
   %2 = bitcast <4 x double> %1 to <8 x float>
   %3 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %2, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>)
@@ -260,29 +384,45 @@ define <4 x double> @combine_vpbroadcast
 }
 
 define <8 x i32> @combine_permd_as_permq(<8 x i32> %a) {
-; CHECK-LABEL: combine_permd_as_permq:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,1]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permd_as_permq:
+; X32:       # BB#0:
+; X32-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,1]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permd_as_permq:
+; X64:       # BB#0:
+; X64-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,1]
+; X64-NEXT:    retq
   %1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 2, i32 3>)
   ret <8 x i32> %1
 }
 
 define <8 x float> @combine_permps_as_permpd(<8 x float> %a) {
-; CHECK-LABEL: combine_permps_as_permpd:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,2,0,1]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permps_as_permpd:
+; X32:       # BB#0:
+; X32-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,2,0,1]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permps_as_permpd:
+; X64:       # BB#0:
+; X64-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,2,0,1]
+; X64-NEXT:    retq
   %1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>)
   ret <8 x float> %1
 }
 
 define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
-; CHECK-LABEL: combine_pshufb_as_vzmovl_64:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vzmovl_64:
+; X32:       # BB#0:
+; X32-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vzmovl_64:
+; X64:       # BB#0:
+; X64-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; X64-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X64-NEXT:    retq
   %1 = bitcast <4 x double> %a0 to <32 x i8>
   %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   %3 = bitcast <32 x i8> %2 to <4 x double>
@@ -290,11 +430,17 @@ define <4 x double> @combine_pshufb_as_v
 }
 
 define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
-; CHECK-LABEL: combine_pshufb_as_vzmovl_32:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vxorps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_vzmovl_32:
+; X32:       # BB#0:
+; X32-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; X32-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_vzmovl_32:
+; X64:       # BB#0:
+; X64-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X64-NEXT:    retq
   %1 = bitcast <8 x float> %a0 to <32 x i8>
   %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   %3 = bitcast <32 x i8> %2 to <8 x float>
@@ -302,77 +448,120 @@ define <8 x float> @combine_pshufb_as_vz
 }
 
 define <32 x i8> @combine_pshufb_as_pslldq(<32 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_as_pslldq:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_pslldq:
+; X32:       # BB#0:
+; X32-NEXT:    vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_pslldq:
+; X64:       # BB#0:
+; X64-NEXT:    vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21]
+; X64-NEXT:    retq
   %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>)
   ret <32 x i8> %res0
 }
 
 define <32 x i8> @combine_pshufb_as_psrldq(<32 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_as_psrldq:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_psrldq:
+; X32:       # BB#0:
+; X32-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_psrldq:
+; X64:       # BB#0:
+; X64-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT:    retq
   %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
   ret <32 x i8> %res0
 }
 
 define <32 x i8> @combine_pshufb_as_pshuflw(<32 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_as_pshuflw:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_pshuflw:
+; X32:       # BB#0:
+; X32-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_pshuflw:
+; X64:       # BB#0:
+; X64-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
+; X64-NEXT:    retq
   %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
   ret <32 x i8> %res0
 }
 
 define <32 x i8> @combine_pshufb_as_pshufhw(<32 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_as_pshufhw:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_pshufhw:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_pshufhw:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14]
+; X64-NEXT:    retq
   %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
   ret <32 x i8> %res0
 }
 
 define <32 x i8> @combine_pshufb_not_as_pshufw(<32 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_not_as_pshufw:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_not_as_pshufw:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_not_as_pshufw:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29]
+; X64-NEXT:    retq
   %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
   %res1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %res0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
   ret <32 x i8> %res1
 }
 
 define <8 x i32> @constant_fold_permd() {
-; CHECK-LABEL: constant_fold_permd:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
-; CHECK-NEXT:    vpermd {{.*}}(%rip), %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: constant_fold_permd:
+; X32:       # BB#0:
+; X32-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
+; X32-NEXT:    vpermd {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: constant_fold_permd:
+; X64:       # BB#0:
+; X64-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
+; X64-NEXT:    vpermd {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    retq
   %1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <8 x i32> <i32 4, i32 6, i32 2, i32 1, i32 7, i32 1, i32 5, i32 0>)
   ret <8 x i32> %1
 }
 
 define <8 x float> @constant_fold_permps() {
-; CHECK-LABEL: constant_fold_permps:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
-; CHECK-NEXT:    vpermps {{.*}}(%rip), %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X32-LABEL: constant_fold_permps:
+; X32:       # BB#0:
+; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
+; X32-NEXT:    vpermps {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: constant_fold_permps:
+; X64:       # BB#0:
+; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
+; X64-NEXT:    vpermps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    retq
   %1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 6, i32 2, i32 1, i32 7, i32 1, i32 5, i32 0>)
   ret <8 x float> %1
 }
 
 define <32 x i8> @constant_fold_pshufb_256() {
-; CHECK-LABEL: constant_fold_pshufb_256:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovdqa {{.*#+}} ymm0 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
-; CHECK-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[15],zero,zero,zero,zero,zero,ymm0[7,6,17],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[31],zero,zero,zero,zero,zero,ymm0[23,22]
-; CHECK-NEXT:    retq
+; X32-LABEL: constant_fold_pshufb_256:
+; X32:       # BB#0:
+; X32-NEXT:    vmovdqa {{.*#+}} ymm0 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
+; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[15],zero,zero,zero,zero,zero,ymm0[7,6,17],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[31],zero,zero,zero,zero,zero,ymm0[23,22]
+; X32-NEXT:    retl
+;
+; X64-LABEL: constant_fold_pshufb_256:
+; X64:       # BB#0:
+; X64-NEXT:    vmovdqa {{.*#+}} ymm0 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
+; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[15],zero,zero,zero,zero,zero,ymm0[7,6,17],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[31],zero,zero,zero,zero,zero,ymm0[23,22]
+; X64-NEXT:    retq
   %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15>, <32 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6, i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>)
   ret <32 x i8> %1
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll?rev=281833&r1=281832&r2=281833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll Sat Sep 17 13:42:41 2016
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512bw | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=X64
 
 declare <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
 
@@ -18,338 +19,575 @@ declare <16 x i32> @llvm.x86.avx512.mask
 declare <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
 
 define <8 x double> @combine_permvar_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
-; CHECK-LABEL: combine_permvar_8f64_identity:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_8f64_identity:
+; X32:       # BB#0:
+; X32-NEXT:    vmovapd {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X32-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
+; X32-NEXT:    vmovapd {{.*#+}} zmm1 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X32-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_8f64_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %res0 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x1, i8 -1)
   %res1 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, i8 -1)
   ret <8 x double> %res1
 }
 define <8 x double> @combine_permvar_8f64_identity_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
-; CHECK-LABEL: combine_permvar_8f64_identity_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
-; CHECK-NEXT:    vpermpd %zmm0, %zmm2, %zmm1 {%k1}
-; CHECK-NEXT:    vmovapd {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
-; CHECK-NEXT:    vpermpd %zmm1, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovapd %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_8f64_identity_mask:
+; X32:       # BB#0:
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vmovapd {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X32-NEXT:    vpermpd %zmm0, %zmm2, %zmm1 {%k1}
+; X32-NEXT:    vmovapd {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X32-NEXT:    vpermpd %zmm1, %zmm0, %zmm1 {%k1}
+; X32-NEXT:    vmovapd %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_8f64_identity_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
+; X64-NEXT:    vpermpd %zmm0, %zmm2, %zmm1 {%k1}
+; X64-NEXT:    vmovapd {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
+; X64-NEXT:    vpermpd %zmm1, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovapd %zmm1, %zmm0
+; X64-NEXT:    retq
   %res0 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x1, i8 %m)
   %res1 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, i8 %m)
   ret <8 x double> %res1
 }
 
 define <8 x i64> @combine_permvar_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
-; CHECK-LABEL: combine_permvar_8i64_identity:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_8i64_identity:
+; X32:       # BB#0:
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X32-NEXT:    vpermq %zmm0, %zmm1, %zmm0
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X32-NEXT:    vpermq %zmm0, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_8i64_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %res0 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x1, i8 -1)
   %res1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, i8 -1)
   ret <8 x i64> %res1
 }
 define <8 x i64> @combine_permvar_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
-; CHECK-LABEL: combine_permvar_8i64_identity_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
-; CHECK-NEXT:    vpermq %zmm0, %zmm2, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
-; CHECK-NEXT:    vpermq %zmm1, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_8i64_identity_mask:
+; X32:       # BB#0:
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X32-NEXT:    vpermq %zmm0, %zmm2, %zmm1 {%k1}
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X32-NEXT:    vpermq %zmm1, %zmm0, %zmm1 {%k1}
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_8i64_identity_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
+; X64-NEXT:    vpermq %zmm0, %zmm2, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
+; X64-NEXT:    vpermq %zmm1, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
   %res0 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x1, i8 %m)
   %res1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, i8 %m)
   ret <8 x i64> %res1
 }
 
 define <8 x double> @combine_vpermt2var_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
-; CHECK-LABEL: combine_vpermt2var_8f64_identity:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_8f64_identity:
+; X32:       # BB#0:
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X32-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X32-NEXT:    vpermt2pd %zmm0, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_8f64_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x0, <8 x double> %x1, i8 -1)
   %res1 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, <8 x double> %res0, i8 -1)
   ret <8 x double> %res1
 }
 define <8 x double> @combine_vpermt2var_8f64_identity_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
-; CHECK-LABEL: combine_vpermt2var_8f64_identity_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
-; CHECK-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,14,5,12,3,10,1,8]
-; CHECK-NEXT:    vpermt2pd %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_8f64_identity_mask:
+; X32:       # BB#0:
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X32-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X32-NEXT:    vpermt2pd %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_8f64_identity_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
+; X64-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,14,5,12,3,10,1,8]
+; X64-NEXT:    vpermt2pd %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
   %res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x0, <8 x double> %x1, i8 %m)
   %res1 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, <8 x double> %res0, i8 %m)
   ret <8 x double> %res1
 }
 
 define <8 x double> @combine_vpermt2var_8f64_movddup(<8 x double> %x0, <8 x double> %x1) {
-; CHECK-LABEL: combine_vpermt2var_8f64_movddup:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_8f64_movddup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,0,0,0,2,0,2,0,4,0,4,0,u,u,u,u>
+; X32-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_8f64_movddup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
+; X64-NEXT:    retq
   %res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2, i64 2, i64 4, i64 4, i64 undef, i64 undef>, <8 x double> %x0, <8 x double> %x1, i8 -1)
   ret <8 x double> %res0
 }
 define <8 x double> @combine_vpermt2var_8f64_movddup_load(<8 x double> *%p0, <8 x double> %x1) {
-; CHECK-LABEL: combine_vpermt2var_8f64_movddup_load:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovddup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_8f64_movddup_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vmovdqa64 (%eax), %zmm1
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,0,0,2,0,2,0,4,0,4,0,6,0,6,0]
+; X32-NEXT:    vpermt2pd %zmm0, %zmm2, %zmm1
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_8f64_movddup_load:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6]
+; X64-NEXT:    retq
   %x0 = load <8 x double>, <8 x double> *%p0
   %res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2, i64 2, i64 4, i64 4, i64 6, i64 6>, <8 x double> %x0, <8 x double> %x1, i8 -1)
   ret <8 x double> %res0
 }
 define <8 x double> @combine_vpermt2var_8f64_movddup_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
-; CHECK-LABEL: combine_vpermt2var_8f64_movddup_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_8f64_movddup_mask:
+; X32:       # BB#0:
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,0,0,2,0,2,0,4,0,4,0,6,0,6,0]
+; X32-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_8f64_movddup_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
+; X64-NEXT:    retq
   %res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2, i64 2, i64 4, i64 4, i64 6, i64 6>, <8 x double> %x0, <8 x double> %x1, i8 %m)
   ret <8 x double> %res0
 }
 
 define <8 x i64> @combine_vpermt2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
-; CHECK-LABEL: combine_vpermt2var_8i64_identity:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_8i64_identity:
+; X32:       # BB#0:
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,u,6,0,5,0,4,0,3,0,2,0,1,0,0,0>
+; X32-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,u,14,0,5,0,12,0,3,0,10,0,1,0,8,0>
+; X32-NEXT:    vpermt2q %zmm0, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_8i64_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %res0 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x0, <8 x i64> %x1, i8 -1)
   %res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 undef, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, <8 x i64> %res0, i8 -1)
   ret <8 x i64> %res1
 }
 define <8 x i64> @combine_vpermt2var_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
-; CHECK-LABEL: combine_vpermt2var_8i64_identity_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
-; CHECK-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,14,5,12,3,10,1,8]
-; CHECK-NEXT:    vpermt2q %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_8i64_identity_mask:
+; X32:       # BB#0:
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X32-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X32-NEXT:    vpermt2q %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_8i64_identity_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
+; X64-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,14,5,12,3,10,1,8]
+; X64-NEXT:    vpermt2q %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
   %res0 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x0, <8 x i64> %x1, i8 %m)
   %res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, <8 x i64> %res0, i8 %m)
   ret <8 x i64> %res1
 }
 
 define <16 x float> @combine_vpermt2var_16f32_identity(<16 x float> %x0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_identity:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_identity:
+; X32:       # BB#0:
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   %res1 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x float> %res0, <16 x float> %res0, i16 -1)
   ret <16 x float> %res1
 }
 define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16f32_identity_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; CHECK-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vmovdqa32 {{.*#+}} zmm1 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
-; CHECK-NEXT:    vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_identity_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X32-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X32-NEXT:    vmovdqa32 {{.*#+}} zmm1 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
+; X32-NEXT:    vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_identity_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X64-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X64-NEXT:    vmovdqa32 {{.*#+}} zmm1 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
+; X64-NEXT:    vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %x0, <16 x float> %x1, i16 %m)
   %res1 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x float> %res0, <16 x float> %res0, i16 %m)
   ret <16 x float> %res1
 }
 
 define <16 x float> @combine_vpermt2var_16f32_vmovddup(<16 x float> %x0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovddup:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; CHECK-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovddup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X32-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovddup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X64-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovddup_load:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm1
-; CHECK-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; CHECK-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovddup_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vmovdqa64 (%eax), %zmm1
+; X32-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X32-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovddup_load:
+; X64:       # BB#0:
+; X64-NEXT:    vmovdqa64 (%rdi), %zmm1
+; X64-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X64-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
   %x0 = load <16 x float>, <16 x float> *%p0
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; CHECK-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X32-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X64-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 %m)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm1
-; CHECK-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; CHECK-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1 {%k1} {z}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
+; X32:       # BB#0:
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vmovdqa64 (%eax), %zmm1
+; X32-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X32-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1 {%k1} {z}
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmovdqa64 (%rdi), %zmm1
+; X64-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X64-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1 {%k1} {z}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
   %x0 = load <16 x float>, <16 x float> *%p0
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 %m)
   ret <16 x float> %res0
 }
 
 define <16 x float> @combine_vpermt2var_16f32_vmovshdup(<16 x float> %x0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovshdup:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovshdup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovshdup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovshdup_load(<16 x float> *%p0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovshdup_load:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovshdup_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovshdup_load:
+; X64:       # BB#0:
+; X64-NEXT:    vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X64-NEXT:    retq
   %x0 = load <16 x float>, <16 x float> *%p0
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovshdup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>, <16 x float> %x0, <16 x float> %x1, i16 %m)
   ret <16 x float> %res0
 }
 
 define <16 x float> @combine_vpermt2var_16f32_vmovsldup(<16 x float> %x0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovsldup:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovsldup:
+; X32:       # BB#0:
+; X32-NEXT:    vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovsldup:
+; X64:       # BB#0:
+; X64-NEXT:    vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovsldup_load(<16 x float> *%p0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovsldup_load:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_load:
+; X64:       # BB#0:
+; X64-NEXT:    vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X64-NEXT:    retq
   %x0 = load <16 x float>, <16 x float> *%p0
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>, <16 x float> %x0, <16 x float> %x1, i16 %m)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X64-NEXT:    retq
   %x0 = load <16 x float>, <16 x float> *%p0
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>, <16 x float> %x0, <16 x float> %x1, i16 %m)
   ret <16 x float> %res0
 }
 
 define <16 x float> @combine_vpermt2var_16f32_vpermilps(<16 x float> %x0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vpermilps:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vpermilps:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vpermilps:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vpermilps_load(<16 x float> *%p0, <16 x float> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vpermilps_load:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vpermilps_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpermilps {{.*#+}} zmm0 = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vpermilps_load:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} zmm0 = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X64-NEXT:    retq
   %x0 = load <16 x float>, <16 x float> *%p0
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>, <16 x float> %x0, <16 x float> %x1, i16 %m)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
+; X32:       # BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X64-NEXT:    retq
   %x0 = load <16 x float>, <16 x float> *%p0
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>, <16 x float> %x0, <16 x float> %x1, i16 %m)
   ret <16 x float> %res0
 }
 
 define <16 x i32> @combine_vpermt2var_16i32_identity(<16 x i32> %x0, <16 x i32> %x1) {
-; CHECK-LABEL: combine_vpermt2var_16i32_identity:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16i32_identity:
+; X32:       # BB#0:
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16i32_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %res0 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef>, <16 x i32> %x0, <16 x i32> %x1, i16 -1)
   %res1 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 undef, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x i32> %res0, <16 x i32> %res0, i16 -1)
   ret <16 x i32> %res1
 }
 define <16 x i32> @combine_vpermt2var_16i32_identity_mask(<16 x i32> %x0, <16 x i32> %x1, i16 %m) {
-; CHECK-LABEL: combine_vpermt2var_16i32_identity_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; CHECK-NEXT:    vpermt2d %zmm1, %zmm2, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vmovdqa32 {{.*#+}} zmm1 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
-; CHECK-NEXT:    vpermt2d %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_16i32_identity_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X32-NEXT:    vpermt2d %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X32-NEXT:    vmovdqa32 {{.*#+}} zmm1 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
+; X32-NEXT:    vpermt2d %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_16i32_identity_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X64-NEXT:    vpermt2d %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X64-NEXT:    vmovdqa32 {{.*#+}} zmm1 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
+; X64-NEXT:    vpermt2d %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
   %res0 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x i32> %x0, <16 x i32> %x1, i16 %m)
   %res1 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x i32> %res0, <16 x i32> %res0, i16 %m)
   ret <16 x i32> %res1
 }
 
 define <32 x i16> @combine_vpermt2var_32i16_identity(<32 x i16> %x0, <32 x i16> %x1) {
-; CHECK-LABEL: combine_vpermt2var_32i16_identity:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_32i16_identity:
+; X32:       # BB#0:
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_32i16_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %res0 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %x0, <32 x i16> %x1, i32 -1)
   %res1 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 63, i16 30, i16 61, i16 28, i16 59, i16 26, i16 57, i16 24, i16 55, i16 22, i16 53, i16 20, i16 51, i16 18, i16 49, i16 16, i16 47, i16 46, i16 13, i16 44, i16 11, i16 42, i16 9, i16 40, i16 7, i16 38, i16 5, i16 36, i16 3, i16 34, i16 1, i16 32>, <32 x i16> %res0, <32 x i16> %res0, i32 -1)
   ret <32 x i16> %res1
 }
 define <32 x i16> @combine_vpermt2var_32i16_identity_mask(<32 x i16> %x0, <32 x i16> %x1, i32 %m) {
-; CHECK-LABEL: combine_vpermt2var_32i16_identity_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vmovdqu16 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; CHECK-NEXT:    vpermt2w %zmm1, %zmm2, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vmovdqu16 {{.*#+}} zmm1 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32]
-; CHECK-NEXT:    vpermt2w %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermt2var_32i16_identity_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    vmovdqu16 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X32-NEXT:    vpermt2w %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X32-NEXT:    vmovdqu16 {{.*#+}} zmm1 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32]
+; X32-NEXT:    vpermt2w %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermt2var_32i16_identity_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vmovdqu16 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X64-NEXT:    vpermt2w %zmm1, %zmm2, %zmm0 {%k1} {z}
+; X64-NEXT:    vmovdqu16 {{.*#+}} zmm1 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32]
+; X64-NEXT:    vpermt2w %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
   %res0 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %x0, <32 x i16> %x1, i32 %m)
   %res1 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 63, i16 30, i16 61, i16 28, i16 59, i16 26, i16 57, i16 24, i16 55, i16 22, i16 53, i16 20, i16 51, i16 18, i16 49, i16 16, i16 47, i16 46, i16 13, i16 44, i16 11, i16 42, i16 9, i16 40, i16 7, i16 38, i16 5, i16 36, i16 3, i16 34, i16 1, i16 32>, <32 x i16> %res0, <32 x i16> %res0, i32 %m)
   ret <32 x i16> %res1
 }
 
 define <64 x i8> @combine_pshufb_identity(<64 x i8> %x0) {
-; CHECK-LABEL: combine_pshufb_identity:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_identity:
+; X32:       # BB#0:
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_identity:
+; X64:       # BB#0:
+; X64-NEXT:    retq
   %select = bitcast <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1> to <64 x i8>
   %mask = bitcast <16 x i32> <i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 undef, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051> to <64 x i8>
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %x0, <64 x i8> %mask, <64 x i8> %select, i64 -1)
@@ -357,16 +595,29 @@ define <64 x i8> @combine_pshufb_identit
   ret <64 x i8> %res1
 }
 define <64 x i8> @combine_pshufb_identity_mask(<64 x i8> %x0, i64 %m) {
-; CHECK-LABEL: combine_pshufb_identity_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovq %rdi, %k1
-; CHECK-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
-; CHECK-NEXT:    vmovdqu8 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; CHECK-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3
-; CHECK-NEXT:    vpshufb %zmm2, %zmm0, %zmm3 {%k1}
-; CHECK-NEXT:    vpshufb %zmm2, %zmm3, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_identity_mask:
+; X32:       # BB#0:
+; X32-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X32-NEXT:    vmovdqu8 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    kunpckdq %k0, %k1, %k1
+; X32-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3
+; X32-NEXT:    vpshufb %zmm2, %zmm0, %zmm3 {%k1}
+; X32-NEXT:    vpshufb %zmm2, %zmm3, %zmm1 {%k1}
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_identity_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovq %rdi, %k1
+; X64-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vmovdqu8 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X64-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3
+; X64-NEXT:    vpshufb %zmm2, %zmm0, %zmm3 {%k1}
+; X64-NEXT:    vpshufb %zmm2, %zmm3, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
   %select = bitcast <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1> to <64 x i8>
   %mask = bitcast <16 x i32> <i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051> to <64 x i8>
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %x0, <64 x i8> %mask, <64 x i8> %select, i64 %m)
@@ -375,140 +626,233 @@ define <64 x i8> @combine_pshufb_identit
 }
 
 define <32 x i16> @combine_permvar_as_vpbroadcastw512(<32 x i16> %x0) {
-; CHECK-LABEL: combine_permvar_as_vpbroadcastw512:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastw %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_as_vpbroadcastw512:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastw %xmm0, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_as_vpbroadcastw512:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastw %xmm0, %zmm0
+; X64-NEXT:    retq
   %1 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %x0, <32 x i16> zeroinitializer, <32 x i16> undef, i32 -1)
   ret <32 x i16> %1
 }
 
 define <16 x i32> @combine_permvar_as_vpbroadcastd512(<16 x i32> %x0) {
-; CHECK-LABEL: combine_permvar_as_vpbroadcastd512:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastd %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_as_vpbroadcastd512:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastd %xmm0, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_as_vpbroadcastd512:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastd %xmm0, %zmm0
+; X64-NEXT:    retq
   %1 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %x0, <16 x i32> zeroinitializer, <16 x i32> undef, i16 -1)
   ret <16 x i32> %1
 }
 
 define <8 x i64> @combine_permvar_as_vpbroadcastq512(<8 x i64> %x0) {
-; CHECK-LABEL: combine_permvar_as_vpbroadcastq512:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpbroadcastq %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_as_vpbroadcastq512:
+; X32:       # BB#0:
+; X32-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X32-NEXT:    vpermq %zmm0, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_as_vpbroadcastq512:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastq %xmm0, %zmm0
+; X64-NEXT:    retq
   %1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> zeroinitializer, <8 x i64> undef, i8 -1)
   ret <8 x i64> %1
 }
 
 define <8 x i64> @combine_permvar_8i64_as_permq(<8 x i64> %x0, <8 x i64> %x1) {
-; CHECK-LABEL: combine_permvar_8i64_as_permq:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_8i64_as_permq:
+; X32:       # BB#0:
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <3,0,2,0,1,0,u,u,u,u,6,0,5,0,4,0>
+; X32-NEXT:    vpermq %zmm0, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_8i64_as_permq:
+; X64:       # BB#0:
+; X64-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
+; X64-NEXT:    retq
   %1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 3, i64 2, i64 1, i64 undef, i64 undef, i64 6, i64 5, i64 4>, <8 x i64> %x1, i8 -1)
   ret <8 x i64> %1
 }
 define <8 x i64> @combine_permvar_8i64_as_permq_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
-; CHECK-LABEL: combine_permvar_8i64_as_permq_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_8i64_as_permq_mask:
+; X32:       # BB#0:
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <3,0,2,0,1,0,u,u,u,u,6,0,5,0,4,0>
+; X32-NEXT:    vpermq %zmm0, %zmm2, %zmm1 {%k1}
+; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_8i64_as_permq_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
   %1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 3, i64 2, i64 1, i64 undef, i64 undef, i64 6, i64 5, i64 4>, <8 x i64> %x1, i8 %m)
   ret <8 x i64> %1
 }
 
 define <8 x double> @combine_permvar_8f64_as_permpd(<8 x double> %x0, <8 x double> %x1) {
-; CHECK-LABEL: combine_permvar_8f64_as_permpd:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_8f64_as_permpd:
+; X32:       # BB#0:
+; X32-NEXT:    vmovapd {{.*#+}} zmm1 = <3,0,2,0,1,0,u,u,u,u,6,0,5,0,4,0>
+; X32-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_8f64_as_permpd:
+; X64:       # BB#0:
+; X64-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
+; X64-NEXT:    retq
   %1 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 3, i64 2, i64 1, i64 undef, i64 undef, i64 6, i64 5, i64 4>, <8 x double> %x1, i8 -1)
   ret <8 x double> %1
 }
 define <8 x double> @combine_permvar_8f64_as_permpd_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
-; CHECK-LABEL: combine_permvar_8f64_as_permpd_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
-; CHECK-NEXT:    vmovapd %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_8f64_as_permpd_mask:
+; X32:       # BB#0:
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovd %eax, %k1
+; X32-NEXT:    vmovapd {{.*#+}} zmm2 = <3,0,2,0,1,0,u,u,u,u,6,0,5,0,4,0>
+; X32-NEXT:    vpermpd %zmm0, %zmm2, %zmm1 {%k1}
+; X32-NEXT:    vmovapd %zmm1, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_8f64_as_permpd_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
+; X64-NEXT:    vmovapd %zmm1, %zmm0
+; X64-NEXT:    retq
   %1 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 3, i64 2, i64 1, i64 undef, i64 undef, i64 6, i64 5, i64 4>, <8 x double> %x1, i8 %m)
   ret <8 x double> %1
 }
 
 define <16 x float> @combine_vpermilvar_16f32_230146759A8BCFDE(<16 x float> %x0) {
-; CHECK-LABEL: combine_vpermilvar_16f32_230146759A8BCFDE:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,4,6,7,5,9,10,8,11,12,15,13,14]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_vpermilvar_16f32_230146759A8BCFDE:
+; X32:       # BB#0:
+; X32-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,4,6,7,5,9,10,8,11,12,15,13,14]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_vpermilvar_16f32_230146759A8BCFDE:
+; X64:       # BB#0:
+; X64-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,4,6,7,5,9,10,8,11,12,15,13,14]
+; X64-NEXT:    retq
   %res0 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 0, i32 3, i32 2>, <16 x float> undef, i16 -1)
   %res1 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %res0, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 3, i32 1, i32 0, i32 2, i32 3, i32 0, i32 2, i32 1, i32 1, i32 2, i32 0, i32 3>, <16 x float> undef, i16 -1)
   ret <16 x float> %res1
 }
 
 define <64 x i8> @combine_pshufb_as_pslldq(<64 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_as_pslldq:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_pslldq:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_pslldq:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
+; X64-NEXT:    retq
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>, <64 x i8> undef, i64 -1)
   ret <64 x i8> %res0
 }
 define <64 x i8> @combine_pshufb_as_pslldq_mask(<64 x i8> %a0, i64 %m) {
-; CHECK-LABEL: combine_pshufb_as_pslldq_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovq %rdi, %k1
-; CHECK-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_pslldq_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    kunpckdq %k0, %k1, %k1
+; X32-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_pslldq_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovq %rdi, %k1
+; X64-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
+; X64-NEXT:    retq
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>, <64 x i8> zeroinitializer, i64 %m)
   ret <64 x i8> %res0
 }
 
 define <64 x i8> @combine_pshufb_as_psrldq(<64 x i8> %a0) {
-; CHECK-LABEL: combine_pshufb_as_psrldq:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_psrldq:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_psrldq:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT:    retq
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>, <64 x i8> undef, i64 -1)
   ret <64 x i8> %res0
 }
 define <64 x i8> @combine_pshufb_as_psrldq_mask(<64 x i8> %a0, i64 %m) {
-; CHECK-LABEL: combine_pshufb_as_psrldq_mask:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    kmovq %rdi, %k1
-; CHECK-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_psrldq_mask:
+; X32:       # BB#0:
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k0
+; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X32-NEXT:    kunpckdq %k0, %k1, %k1
+; X32-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_psrldq_mask:
+; X64:       # BB#0:
+; X64-NEXT:    kmovq %rdi, %k1
+; X64-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT:    retq
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>, <64 x i8> zeroinitializer, i64 %m)
   ret <64 x i8> %res0
 }
 
 define <32 x i16> @combine_permvar_as_pshuflw(<32 x i16> %a0) {
-; CHECK-LABEL: combine_permvar_as_pshuflw:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_permvar_as_pshuflw:
+; X32:       # BB#0:
+; X32-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_permvar_as_pshuflw:
+; X64:       # BB#0:
+; X64-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
+; X64-NEXT:    retq
   %res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 0, i16 3, i16 2, i16 4, i16 5, i16 6, i16 7, i16 9, i16 8, i16 11, i16 10, i16 12, i16 13, i16 14, i16 15, i16 17, i16 16, i16 19, i16 18, i16 20, i16 21, i16 22, i16 23, i16 25, i16 24, i16 27, i16 26, i16 28, i16 29, i16 30, i16 31>, <32 x i16> undef, i32 -1)
   ret <32 x i16> %res0
 }
 
 define <32 x i16> @combine_pshufb_as_pshufhw(<32 x i16> %a0) {
-; CHECK-LABEL: combine_pshufb_as_pshufhw:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_pshufhw:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_pshufhw:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
+; X64-NEXT:    retq
   %res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 5, i16 4, i16 7, i16 6, i16 8, i16 9, i16 10, i16 11, i16 13, i16 12, i16 15, i16 14, i16 16, i16 17, i16 18, i16 19, i16 21, i16 20, i16 23, i16 22, i16 24, i16 25, i16 26, i16 27, i16 29, i16 28, i16 31, i16 30>, <32 x i16> undef, i32 -1)
   ret <32 x i16> %res0
 }
 
 define <32 x i16> @combine_pshufb_as_pshufw(<32 x i16> %a0) {
-; CHECK-LABEL: combine_pshufb_as_pshufw:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
-; CHECK-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
-; CHECK-NEXT:    retq
+; X32-LABEL: combine_pshufb_as_pshufw:
+; X32:       # BB#0:
+; X32-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
+; X32-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_pshufw:
+; X64:       # BB#0:
+; X64-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
+; X64-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
+; X64-NEXT:    retq
   %res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 0, i16 3, i16 2, i16 4, i16 5, i16 6, i16 7, i16 9, i16 8, i16 11, i16 10, i16 12, i16 13, i16 14, i16 15, i16 17, i16 16, i16 19, i16 18, i16 20, i16 21, i16 22, i16 23, i16 25, i16 24, i16 27, i16 26, i16 28, i16 29, i16 30, i16 31>, <32 x i16> undef, i32 -1)
   %res1 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %res0, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 5, i16 4, i16 7, i16 6, i16 8, i16 9, i16 10, i16 11, i16 13, i16 12, i16 15, i16 14, i16 16, i16 17, i16 18, i16 19, i16 21, i16 20, i16 23, i16 22, i16 24, i16 25, i16 26, i16 27, i16 29, i16 28, i16 31, i16 30>, <32 x i16> undef, i32 -1)
   ret <32 x i16> %res1




More information about the llvm-commits mailing list