[llvm] r251287 - AVX512: Enabled VPBROADCASTB lowering for v64i8 vectors.

Igor Breger via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 26 06:01:02 PDT 2015


Author: ibreger
Date: Mon Oct 26 08:01:02 2015
New Revision: 251287

URL: http://llvm.org/viewvc/llvm-project?rev=251287&view=rev
Log:
AVX512: Enabled VPBROADCASTB lowering for v64i8 vectors.

Differential Revision: http://reviews.llvm.org/D13896

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=251287&r1=251286&r2=251287&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Oct 26 08:01:02 2015
@@ -1652,6 +1652,7 @@ X86TargetLowering::X86TargetLowering(con
     setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i16, Custom);
     setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i16, Custom);
     setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v32i16, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v64i8, Custom);
     setOperationAction(ISD::SIGN_EXTEND,        MVT::v64i8, Custom);
     setOperationAction(ISD::ZERO_EXTEND,        MVT::v64i8, Custom);
     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v32i1, Custom);

Modified: llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll?rev=251287&r1=251286&r2=251287&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll Mon Oct 26 08:01:02 2015
@@ -1,47 +1,53 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 
 define   <16 x i32> @_inreg16xi32(i32 %a) {
-; CHECK-LABEL: _inreg16xi32:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpbroadcastd %edi, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: _inreg16xi32:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpbroadcastd %edi, %zmm0
+; ALL-NEXT:    retq
   %b = insertelement <16 x i32> undef, i32 %a, i32 0
   %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
   ret <16 x i32> %c
 }
 
 define   <8 x i64> @_inreg8xi64(i64 %a) {
-; CHECK-LABEL: _inreg8xi64:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpbroadcastq %rdi, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: _inreg8xi64:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpbroadcastq %rdi, %zmm0
+; ALL-NEXT:    retq
   %b = insertelement <8 x i64> undef, i64 %a, i32 0
   %c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
   ret <8 x i64> %c
 }
 
-;CHECK-LABEL: _ss16xfloat_v4
-;CHECK: vbroadcastss %xmm0, %zmm0
-;CHECK: ret
 define   <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
+; ALL-LABEL: _ss16xfloat_v4:
+; ALL:       # BB#0:
+; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
+; ALL-NEXT:    retq
   %b = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> zeroinitializer
   ret <16 x float> %b
 }
 
 define   <16 x float> @_inreg16xfloat(float %a) {
-; CHECK-LABEL: _inreg16xfloat:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    vbroadcastss %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: _inreg16xfloat:
+; ALL:       # BB#0:
+; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
+; ALL-NEXT:    retq
   %b = insertelement <16 x float> undef, float %a, i32 0
   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
   ret <16 x float> %c
 }
 
-;CHECK-LABEL: _ss16xfloat_mask:
-;CHECK: vbroadcastss %xmm0, %zmm1 {%k1}
-;CHECK: ret
 define   <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
+; ALL-LABEL: _ss16xfloat_mask:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; ALL-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
+; ALL-NEXT:    vbroadcastss %xmm0, %zmm1 {%k1}
+; ALL-NEXT:    vmovaps %zmm1, %zmm0
+; ALL-NEXT:    retq
   %mask = icmp ne <16 x i32> %mask1, zeroinitializer
   %b = insertelement <16 x float> undef, float %a, i32 0
   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
@@ -49,10 +55,13 @@ define   <16 x float> @_ss16xfloat_mask(
   ret <16 x float> %r
 }
 
-;CHECK-LABEL: _ss16xfloat_maskz:
-;CHECK: vbroadcastss %xmm0, %zmm0 {%k1} {z}
-;CHECK: ret
 define   <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
+; ALL-LABEL: _ss16xfloat_maskz:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
+; ALL-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
+; ALL-NEXT:    retq
   %mask = icmp ne <16 x i32> %mask1, zeroinitializer
   %b = insertelement <16 x float> undef, float %a, i32 0
   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
@@ -60,20 +69,24 @@ define   <16 x float> @_ss16xfloat_maskz
   ret <16 x float> %r
 }
 
-;CHECK-LABEL: _ss16xfloat_load:
-;CHECK: vbroadcastss (%{{.*}}, %zmm
-;CHECK: ret
 define   <16 x float> @_ss16xfloat_load(float* %a.ptr) {
+; ALL-LABEL: _ss16xfloat_load:
+; ALL:       # BB#0:
+; ALL-NEXT:    vbroadcastss (%rdi), %zmm0
+; ALL-NEXT:    retq
   %a = load float, float* %a.ptr
   %b = insertelement <16 x float> undef, float %a, i32 0
   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
   ret <16 x float> %c
 }
 
-;CHECK-LABEL: _ss16xfloat_mask_load:
-;CHECK: vbroadcastss (%rdi), %zmm0 {%k1}
-;CHECK: ret
 define   <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
+; ALL-LABEL: _ss16xfloat_mask_load:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
+; ALL-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1}
+; ALL-NEXT:    retq
   %a = load float, float* %a.ptr
   %mask = icmp ne <16 x i32> %mask1, zeroinitializer
   %b = insertelement <16 x float> undef, float %a, i32 0
@@ -82,10 +95,13 @@ define   <16 x float> @_ss16xfloat_mask_
   ret <16 x float> %r
 }
 
-;CHECK-LABEL: _ss16xfloat_maskz_load:
-;CHECK: vbroadcastss (%rdi), %zmm0 {%k1} {z}
-;CHECK: ret
 define   <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
+; ALL-LABEL: _ss16xfloat_maskz_load:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1
+; ALL-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1} {z}
+; ALL-NEXT:    retq
   %a = load float, float* %a.ptr
   %mask = icmp ne <16 x i32> %mask1, zeroinitializer
   %b = insertelement <16 x float> undef, float %a, i32 0
@@ -95,19 +111,23 @@ define   <16 x float> @_ss16xfloat_maskz
 }
 
 define   <8 x double> @_inreg8xdouble(double %a) {
-; CHECK-LABEL: _inreg8xdouble:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    vbroadcastsd %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: _inreg8xdouble:
+; ALL:       # BB#0:
+; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0
+; ALL-NEXT:    retq
   %b = insertelement <8 x double> undef, double %a, i32 0
   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
   ret <8 x double> %c
 }
 
-;CHECK-LABEL: _sd8xdouble_mask:
-;CHECK: vbroadcastsd %xmm0, %zmm1 {%k1}
-;CHECK: ret
 define   <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
+; ALL-LABEL: _sd8xdouble_mask:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; ALL-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
+; ALL-NEXT:    vbroadcastsd %xmm0, %zmm1 {%k1}
+; ALL-NEXT:    vmovaps %zmm1, %zmm0
+; ALL-NEXT:    retq
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %b = insertelement <8 x double> undef, double %a, i32 0
   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
@@ -115,10 +135,13 @@ define   <8 x double> @_sd8xdouble_mask(
   ret <8 x double> %r
 }
 
-;CHECK-LABEL: _sd8xdouble_maskz:
-;CHECK: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
-;CHECK: ret
 define   <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
+; ALL-LABEL: _sd8xdouble_maskz:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
+; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0 {%k1} {z}
+; ALL-NEXT:    retq
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %b = insertelement <8 x double> undef, double %a, i32 0
   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
@@ -126,20 +149,24 @@ define   <8 x double> @_sd8xdouble_maskz
   ret <8 x double> %r
 }
 
-;CHECK-LABEL: _sd8xdouble_load:
-;CHECK: vbroadcastsd (%rdi), %zmm
-;CHECK: ret
 define   <8 x double> @_sd8xdouble_load(double* %a.ptr) {
+; ALL-LABEL: _sd8xdouble_load:
+; ALL:       # BB#0:
+; ALL-NEXT:    vbroadcastsd (%rdi), %zmm0
+; ALL-NEXT:    retq
   %a = load double, double* %a.ptr
   %b = insertelement <8 x double> undef, double %a, i32 0
   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
   ret <8 x double> %c
 }
 
-;CHECK-LABEL: _sd8xdouble_mask_load:
-;CHECK: vbroadcastsd (%rdi), %zmm0 {%k1}
-;CHECK: ret
 define   <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
+; ALL-LABEL: _sd8xdouble_mask_load:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
+; ALL-NEXT:    vbroadcastsd (%rdi), %zmm0 {%k1}
+; ALL-NEXT:    retq
   %a = load double, double* %a.ptr
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %b = insertelement <8 x double> undef, double %a, i32 0
@@ -149,9 +176,12 @@ define   <8 x double> @_sd8xdouble_mask_
 }
 
 define   <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
-; CHECK-LABEL: _sd8xdouble_maskz_load:
-; CHECK:    vbroadcastsd (%rdi), %zmm0 {%k1} {z}
-; CHECK:    ret
+; ALL-LABEL: _sd8xdouble_maskz_load:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; ALL-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1
+; ALL-NEXT:    vbroadcastsd (%rdi), %zmm0 {%k1} {z}
+; ALL-NEXT:    retq
   %a = load double, double* %a.ptr
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %b = insertelement <8 x double> undef, double %a, i32 0
@@ -161,32 +191,32 @@ define   <8 x double> @_sd8xdouble_maskz
 }
 
 define   <16 x i32> @_xmm16xi32(<16 x i32> %a) {
-; CHECK-LABEL: _xmm16xi32:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpbroadcastd %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: _xmm16xi32:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpbroadcastd %xmm0, %zmm0
+; ALL-NEXT:    retq
   %b = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> zeroinitializer
   ret <16 x i32> %b
 }
 
 define   <16 x float> @_xmm16xfloat(<16 x float> %a) {
-; CHECK-LABEL: _xmm16xfloat:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    vbroadcastss %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: _xmm16xfloat:
+; ALL:       # BB#0:
+; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
+; ALL-NEXT:    retq
   %b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> zeroinitializer
   ret <16 x float> %b
 }
 
 define <16 x i32> @test_vbroadcast() {
-; CHECK-LABEL: test_vbroadcast:
-; CHECK:       ## BB#0: ## %entry
-; CHECK-NEXT:    vpxord %zmm0, %zmm0, %zmm0
-; CHECK-NEXT:    vcmpunordps %zmm0, %zmm0, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
-; CHECK-NEXT:    knotw %k1, %k1
-; CHECK-NEXT:    vmovdqu32 %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; ALL-LABEL: test_vbroadcast:
+; ALL:       # BB#0: # %entry
+; ALL-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; ALL-NEXT:    vcmpunordps %zmm0, %zmm0, %k1
+; ALL-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; ALL-NEXT:    knotw %k1, %k1
+; ALL-NEXT:    vmovdqu32 %zmm0, %zmm0 {%k1} {z}
+; ALL-NEXT:    retq
 entry:
   %0 = sext <16 x i1> zeroinitializer to <16 x i32>
   %1 = fcmp uno <16 x float> undef, zeroinitializer
@@ -198,10 +228,10 @@ entry:
 ; We implement the set1 intrinsics with vector initializers.  Verify that the
 ; IR generated will produce broadcasts at the end.
 define <8 x double> @test_set1_pd(double %d) #2 {
-; CHECK-LABEL: test_set1_pd:
-; CHECK:       ## BB#0: ## %entry
-; CHECK-NEXT:    vbroadcastsd %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: test_set1_pd:
+; ALL:       # BB#0: # %entry
+; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0
+; ALL-NEXT:    retq
 entry:
   %vecinit.i = insertelement <8 x double> undef, double %d, i32 0
   %vecinit1.i = insertelement <8 x double> %vecinit.i, double %d, i32 1
@@ -215,10 +245,10 @@ entry:
 }
 
 define <8 x i64> @test_set1_epi64(i64 %d) #2 {
-; CHECK-LABEL: test_set1_epi64:
-; CHECK:       ## BB#0: ## %entry
-; CHECK-NEXT:    vpbroadcastq %rdi, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: test_set1_epi64:
+; ALL:       # BB#0: # %entry
+; ALL-NEXT:    vpbroadcastq %rdi, %zmm0
+; ALL-NEXT:    retq
 entry:
   %vecinit.i = insertelement <8 x i64> undef, i64 %d, i32 0
   %vecinit1.i = insertelement <8 x i64> %vecinit.i, i64 %d, i32 1
@@ -232,10 +262,10 @@ entry:
 }
 
 define <16 x float> @test_set1_ps(float %f) #2 {
-; CHECK-LABEL: test_set1_ps:
-; CHECK:       ## BB#0: ## %entry
-; CHECK-NEXT:    vbroadcastss %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: test_set1_ps:
+; ALL:       # BB#0: # %entry
+; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
+; ALL-NEXT:    retq
 entry:
   %vecinit.i = insertelement <16 x float> undef, float %f, i32 0
   %vecinit1.i = insertelement <16 x float> %vecinit.i, float %f, i32 1
@@ -257,10 +287,10 @@ entry:
 }
 
 define <16 x i32> @test_set1_epi32(i32 %f) #2 {
-; CHECK-LABEL: test_set1_epi32:
-; CHECK:       ## BB#0: ## %entry
-; CHECK-NEXT:    vpbroadcastd %edi, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: test_set1_epi32:
+; ALL:       # BB#0: # %entry
+; ALL-NEXT:    vpbroadcastd %edi, %zmm0
+; ALL-NEXT:    retq
 entry:
   %vecinit.i = insertelement <16 x i32> undef, i32 %f, i32 0
   %vecinit1.i = insertelement <16 x i32> %vecinit.i, i32 %f, i32 1
@@ -284,10 +314,10 @@ entry:
 ; We implement the scalar broadcast intrinsics with vector initializers.
 ; Verify that the IR generated will produce the broadcast at the end.
 define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a) {
-; CHECK-LABEL: test_mm512_broadcastsd_pd:
-; CHECK:       ## BB#0: ## %entry
-; CHECK-NEXT:    vbroadcastsd %xmm0, %zmm0
-; CHECK-NEXT:    retq
+; ALL-LABEL: test_mm512_broadcastsd_pd:
+; ALL:       # BB#0: # %entry
+; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0
+; ALL-NEXT:    retq
 entry:
   %0 = extractelement <2 x double> %a, i32 0
   %vecinit.i = insertelement <8 x double> undef, double %0, i32 0
@@ -301,30 +331,69 @@ entry:
   ret <8 x double> %vecinit7.i
 }
 
-; CHECK-LABEL: test1
-; CHECK: vbroadcastss
 define <16 x float> @test1(<8 x float>%a)  {
+; ALL-LABEL: test1:
+; ALL:       # BB#0:
+; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
+; ALL-NEXT:    retq
   %res = shufflevector <8 x float> %a, <8 x float> undef, <16 x i32> zeroinitializer
   ret <16 x float>%res
 }
 
-; CHECK-LABEL: test2
-; CHECK: vbroadcastsd
 define <8 x double> @test2(<4 x double>%a)  {
+; ALL-LABEL: test2:
+; ALL:       # BB#0:
+; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0
+; ALL-NEXT:    retq
   %res = shufflevector <4 x double> %a, <4 x double> undef, <8 x i32> zeroinitializer
   ret <8 x double>%res
 }
 
-; CHECK-LABEL: test3
-; CHECK: vpbroadcastd
-define <16 x i32> @test3(<8 x i32>%a)  {
+define <64 x i8> @_invec32xi8(<32 x i8>%a)  {
+; AVX512F-LABEL: _invec32xi8:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vpbroadcastb %xmm0, %ymm0
+; AVX512F-NEXT:    vmovaps %zmm0, %zmm1
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: _invec32xi8:
+; AVX512BW:       # BB#0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %res = shufflevector <32 x i8> %a, <32 x i8> undef, <64 x i32> zeroinitializer
+  ret <64 x i8>%res
+}
+
+define <32 x i16> @_invec16xi16(<16 x i16>%a)  {
+; AVX512F-LABEL: _invec16xi16:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vpbroadcastw %xmm0, %ymm0
+; AVX512F-NEXT:    vmovaps %zmm0, %zmm1
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: _invec16xi16:
+; AVX512BW:       # BB#0:
+; AVX512BW-NEXT:    vpbroadcastw %xmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %res = shufflevector <16 x i16> %a, <16 x i16> undef, <32 x i32> zeroinitializer
+  ret <32 x i16>%res
+}
+
+define <16 x i32> @_invec8xi32(<8 x i32>%a)  {
+; ALL-LABEL: _invec8xi32:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpbroadcastd %xmm0, %zmm0
+; ALL-NEXT:    retq
   %res = shufflevector <8 x i32> %a, <8 x i32> undef, <16 x i32> zeroinitializer
   ret <16 x i32>%res
 }
 
-; CHECK-LABEL: test4
-; CHECK: vpbroadcastq
-define <8 x i64> @test4(<4 x i64>%a)  {
+define <8 x i64> @_invec4xi64(<4 x i64>%a)  {
+; ALL-LABEL: _invec4xi64:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpbroadcastq %xmm0, %zmm0
+; ALL-NEXT:    retq
   %res = shufflevector <4 x i64> %a, <4 x i64> undef, <8 x i32> zeroinitializer
   ret <8 x i64>%res
 }
+




More information about the llvm-commits mailing list