[llvm] r274537 - [X86][AVX512] Added BROADCAST intrinsics fast-isel generic IR tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 5 03:15:14 PDT 2016


Author: rksimon
Date: Tue Jul  5 05:15:14 2016
New Revision: 274537

URL: http://llvm.org/viewvc/llvm-project?rev=274537&view=rev
Log:
[X86][AVX512] Added BROADCAST intrinsics fast-isel generic IR tests

Modified:
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll?rev=274537&r1=274536&r2=274537&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll Tue Jul  5 05:15:14 2016
@@ -4,6 +4,221 @@
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512f-builtins.c
 
+define <8 x i64> @test_mm512_broadcastd_epi32(<2 x i64> %a0) {
+; X32-LABEL: test_mm512_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastd %xmm0, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastd %xmm0, %zmm0
+; X64-NEXT:    retq
+  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg0, <4 x i32> undef, <16 x i32> zeroinitializer
+  %res1 = bitcast <16 x i32> %res0 to <8 x i64>
+  ret <8 x i64> %res1
+}
+
+define <8 x i64> @test_mm512_mask_broadcastd_epi32(<8 x i64> %a0, i16 %a1, <2 x i64> %a2) {
+; X32-LABEL: test_mm512_mask_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    movw {{[0-9]+}}(%esp), %ax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastd %xmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpbroadcastd %xmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+  %arg0 = bitcast <8 x i64> %a0 to <16 x i32>
+  %arg1 = bitcast i16 %a1 to <16 x i1>
+  %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg2, <4 x i32> undef, <16 x i32> zeroinitializer
+  %res1 = select <16 x i1> %arg1, <16 x i32> %res0, <16 x i32> %arg0
+  %res2 = bitcast <16 x i32> %res1 to <8 x i64>
+  ret <8 x i64> %res2
+}
+
+define <8 x i64> @test_mm512_maskz_broadcastd_epi32(i16 %a0, <2 x i64> %a1) {
+; X32-LABEL: test_mm512_maskz_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    movw {{[0-9]+}}(%esp), %ax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastd %xmm0, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpbroadcastd %xmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+  %arg0 = bitcast i16 %a0 to <16 x i1>
+  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg1, <4 x i32> undef, <16 x i32> zeroinitializer
+  %res1 = select <16 x i1> %arg0, <16 x i32> %res0, <16 x i32> zeroinitializer
+  %res2 = bitcast <16 x i32> %res1 to <8 x i64>
+  ret <8 x i64> %res2
+}
+
+define <8 x i64> @test_mm512_broadcastq_epi64(<2 x i64> %a0) {
+; X32-LABEL: test_mm512_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastq %xmm0, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastq %xmm0, %zmm0
+; X64-NEXT:    retq
+  %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <8 x i32> zeroinitializer
+  ret <8 x i64> %res
+}
+
+define <8 x i64> @test_mm512_mask_broadcastq_epi64(<8 x i64> %a0, i8 %a1, <2 x i64> %a2) {
+; X32-LABEL: test_mm512_mask_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastq %xmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpbroadcastq %xmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+  %arg1 = bitcast i8 %a1 to <8 x i1>
+  %res0 = shufflevector <2 x i64> %a2, <2 x i64> undef, <8 x i32> zeroinitializer
+  %res1 = select <8 x i1> %arg1, <8 x i64> %res0, <8 x i64> %a0
+  ret <8 x i64> %res1
+}
+
+define <8 x i64> @test_mm512_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
+; X32-LABEL: test_mm512_maskz_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastq %xmm0, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpbroadcastq %xmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+  %arg0 = bitcast i8 %a0 to <8 x i1>
+  %res0 = shufflevector <2 x i64> %a1, <2 x i64> undef, <8 x i32> zeroinitializer
+  %res1 = select <8 x i1> %arg0, <8 x i64> %res0, <8 x i64> zeroinitializer
+  ret <8 x i64> %res1
+}
+
+define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a0) {
+; X32-LABEL: test_mm512_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastsd %xmm0, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastsd %xmm0, %zmm0
+; X64-NEXT:    retq
+  %res = shufflevector <2 x double> %a0, <2 x double> undef, <8 x i32> zeroinitializer
+  ret <8 x double> %res
+}
+
+define <8 x double> @test_mm512_mask_broadcastsd_pd(<8 x double> %a0, i8 %a1, <2 x double> %a2) {
+; X32-LABEL: test_mm512_mask_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastsd %xmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vbroadcastsd %xmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+  %arg1 = bitcast i8 %a1 to <8 x i1>
+  %res0 = shufflevector <2 x double> %a2, <2 x double> undef, <8 x i32> zeroinitializer
+  %res1 = select <8 x i1> %arg1, <8 x double> %res0, <8 x double> %a0
+  ret <8 x double> %res1
+}
+
+define <8 x double> @test_mm512_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
+; X32-LABEL: test_mm512_maskz_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastsd %xmm0, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vbroadcastsd %xmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+  %arg0 = bitcast i8 %a0 to <8 x i1>
+  %res0 = shufflevector <2 x double> %a1, <2 x double> undef, <8 x i32> zeroinitializer
+  %res1 = select <8 x i1> %arg0, <8 x double> %res0, <8 x double> zeroinitializer
+  ret <8 x double> %res1
+}
+
+define <16 x float> @test_mm512_broadcastss_ps(<4 x float> %a0) {
+; X32-LABEL: test_mm512_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastss %xmm0, %zmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastss %xmm0, %zmm0
+; X64-NEXT:    retq
+  %res = shufflevector <4 x float> %a0, <4 x float> undef, <16 x i32> zeroinitializer
+  ret <16 x float> %res
+}
+
+define <16 x float> @test_mm512_mask_broadcastss_ps(<16 x float> %a0, i16 %a1, <4 x float> %a2) {
+; X32-LABEL: test_mm512_mask_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    movw {{[0-9]+}}(%esp), %ax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastss %xmm1, %zmm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vbroadcastss %xmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+  %arg1 = bitcast i16 %a1 to <16 x i1>
+  %res0 = shufflevector <4 x float> %a2, <4 x float> undef, <16 x i32> zeroinitializer
+  %res1 = select <16 x i1> %arg1, <16 x float> %res0, <16 x float> %a0
+  ret <16 x float> %res1
+}
+
+define <16 x float> @test_mm512_maskz_broadcastss_ps(i16 %a0, <4 x float> %a1) {
+; X32-LABEL: test_mm512_maskz_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    movw {{[0-9]+}}(%esp), %ax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+  %arg0 = bitcast i16 %a0 to <16 x i1>
+  %res0 = shufflevector <4 x float> %a1, <4 x float> undef, <16 x i32> zeroinitializer
+  %res1 = select <16 x i1> %arg0, <16 x float> %res0, <16 x float> zeroinitializer
+  ret <16 x float> %res1
+}
+
 define <8 x double> @test_mm512_movddup_pd(<8 x double> %a0) {
 ; X32-LABEL: test_mm512_movddup_pd:
 ; X32:       # BB#0:

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll?rev=274537&r1=274536&r2=274537&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll Tue Jul  5 05:15:14 2016
@@ -4,6 +4,568 @@
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vl-builtins.c
 
+define <2 x i64> @test_mm_broadcastd_epi32(<2 x i64> %a0) {
+; X32-LABEL: test_mm_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastd %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastd %xmm0, %xmm0
+; X64-NEXT:    retq
+  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg0, <4 x i32> undef, <4 x i32> zeroinitializer
+  %res1 = bitcast <4 x i32> %res0 to <2 x i64>
+  ret <2 x i64> %res1
+}
+
+define <2 x i64> @test_mm_mask_broadcastd_epi32(<2 x i64> %a0, i8 %a1, <2 x i64> %a2) {
+; X32-LABEL: test_mm_mask_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp0:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastd %xmm1, %xmm0 {%k1}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpbroadcastd %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a1 to i4
+  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
+  %arg1 = bitcast i4 %trn1 to <4 x i1>
+  %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg2, <4 x i32> undef, <4 x i32> zeroinitializer
+  %res1 = select <4 x i1> %arg1, <4 x i32> %res0, <4 x i32> %arg0
+  %res2 = bitcast <4 x i32> %res1 to <2 x i64>
+  ret <2 x i64> %res2
+}
+
+define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
+; X32-LABEL: test_mm_maskz_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp1:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastd %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpbroadcastd %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+  %trn0 = trunc i8 %a0 to i4
+  %arg0 = bitcast i4 %trn0 to <4 x i1>
+  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg1, <4 x i32> undef, <4 x i32> zeroinitializer
+  %res1 = select <4 x i1> %arg0, <4 x i32> %res0, <4 x i32> zeroinitializer
+  %res2 = bitcast <4 x i32> %res1 to <2 x i64>
+  ret <2 x i64> %res2
+}
+
+define <4 x i64> @test_mm256_broadcastd_epi32(<2 x i64> %a0) {
+; X32-LABEL: test_mm256_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastd %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastd %xmm0, %ymm0
+; X64-NEXT:    retq
+  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg0, <4 x i32> undef, <8 x i32> zeroinitializer
+  %res1 = bitcast <8 x i32> %res0 to <4 x i64>
+  ret <4 x i64> %res1
+}
+
+define <4 x i64> @test_mm256_mask_broadcastd_epi32(<4 x i64> %a0, i8 %a1, <2 x i64> %a2) {
+; X32-LABEL: test_mm256_mask_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastd %xmm1, %ymm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpbroadcastd %xmm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+  %arg0 = bitcast <4 x i64> %a0 to <8 x i32>
+  %arg1 = bitcast i8 %a1 to <8 x i1>
+  %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg2, <4 x i32> undef, <8 x i32> zeroinitializer
+  %res1 = select <8 x i1> %arg1, <8 x i32> %res0, <8 x i32> %arg0
+  %res2 = bitcast <8 x i32> %res1 to <4 x i64>
+  ret <4 x i64> %res2
+}
+
+define <4 x i64> @test_mm256_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
+; X32-LABEL: test_mm256_maskz_broadcastd_epi32:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastd %xmm0, %ymm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_broadcastd_epi32:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpbroadcastd %xmm0, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+  %arg0 = bitcast i8 %a0 to <8 x i1>
+  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
+  %res0 = shufflevector <4 x i32> %arg1, <4 x i32> undef, <8 x i32> zeroinitializer
+  %res1 = select <8 x i1> %arg0, <8 x i32> %res0, <8 x i32> zeroinitializer
+  %res2 = bitcast <8 x i32> %res1 to <4 x i64>
+  ret <4 x i64> %res2
+}
+
+define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
+; X32-LABEL: test_mm_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastq %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastq %xmm0, %xmm0
+; X64-NEXT:    retq
+  %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
+  ret <2 x i64> %res
+}
+
+define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64> %a2) {
+; X32-LABEL: test_mm_mask_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp2:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $3, %al
+; X32-NEXT:    movb %al, {{[0-9]+}}(%esp)
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastq %xmm1, %xmm0 {%k1}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    andb $3, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpbroadcastq %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a1 to i2
+  %arg1 = bitcast i2 %trn1 to <2 x i1>
+  %res0 = shufflevector <2 x i64> %a2, <2 x i64> undef, <2 x i32> zeroinitializer
+  %res1 = select <2 x i1> %arg1, <2 x i64> %res0, <2 x i64> %a0
+  ret <2 x i64> %res1
+}
+
+define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
+; X32-LABEL: test_mm_maskz_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp3:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $3, %al
+; X32-NEXT:    movb %al, {{[0-9]+}}(%esp)
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastq %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    andb $3, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpbroadcastq %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+  %trn0 = trunc i8 %a0 to i2
+  %arg0 = bitcast i2 %trn0 to <2 x i1>
+  %res0 = shufflevector <2 x i64> %a1, <2 x i64> undef, <2 x i32> zeroinitializer
+  %res1 = select <2 x i1> %arg0, <2 x i64> %res0, <2 x i64> zeroinitializer
+  ret <2 x i64> %res1
+}
+
+define <4 x i64> @test_mm256_broadcastq_epi64(<2 x i64> %a0) {
+; X32-LABEL: test_mm256_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    vpbroadcastq %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    vpbroadcastq %xmm0, %ymm0
+; X64-NEXT:    retq
+  %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> zeroinitializer
+  ret <4 x i64> %res
+}
+
+define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i64> %a2) {
+; X32-LABEL: test_mm256_mask_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp4:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastq %xmm1, %ymm0 {%k1}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpbroadcastq %xmm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a1 to i4
+  %arg1 = bitcast i4 %trn1 to <4 x i1>
+  %res0 = shufflevector <2 x i64> %a2, <2 x i64> undef, <4 x i32> zeroinitializer
+  %res1 = select <4 x i1> %arg1, <4 x i64> %res0, <4 x i64> %a0
+  ret <4 x i64> %res1
+}
+
+define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
+; X32-LABEL: test_mm256_maskz_broadcastq_epi64:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp5:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vpbroadcastq %xmm0, %ymm0 {%k1} {z}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_broadcastq_epi64:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vpbroadcastq %xmm0, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+  %trn0 = trunc i8 %a0 to i4
+  %arg0 = bitcast i4 %trn0 to <4 x i1>
+  %res0 = shufflevector <2 x i64> %a1, <2 x i64> undef, <4 x i32> zeroinitializer
+  %res1 = select <4 x i1> %arg0, <4 x i64> %res0, <4 x i64> zeroinitializer
+  ret <4 x i64> %res1
+}
+
+define <2 x double> @test_mm_broadcastsd_pd(<2 x double> %a0) {
+; X32-LABEL: test_mm_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT:    retq
+  %res = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
+  ret <2 x double> %res
+}
+
+define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x double> %a2) {
+; X32-LABEL: test_mm_mask_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp6:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $3, %al
+; X32-NEXT:    movb %al, {{[0-9]+}}(%esp)
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    andb $3, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a1 to i2
+  %arg1 = bitcast i2 %trn1 to <2 x i1>
+  %res0 = shufflevector <2 x double> %a2, <2 x double> undef, <2 x i32> zeroinitializer
+  %res1 = select <2 x i1> %arg1, <2 x double> %res0, <2 x double> %a0
+  ret <2 x double> %res1
+}
+
+define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
+; X32-LABEL: test_mm_maskz_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp7:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $3, %al
+; X32-NEXT:    movb %al, {{[0-9]+}}(%esp)
+; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    andb $3, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
+; X64-NEXT:    retq
+  %trn0 = trunc i8 %a0 to i2
+  %arg0 = bitcast i2 %trn0 to <2 x i1>
+  %res0 = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
+  %res1 = select <2 x i1> %arg0, <2 x double> %res0, <2 x double> zeroinitializer
+  ret <2 x double> %res1
+}
+
+define <4 x double> @test_mm256_broadcastsd_pd(<2 x double> %a0) {
+; X32-LABEL: test_mm256_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastsd %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastsd %xmm0, %ymm0
+; X64-NEXT:    retq
+  %res = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> zeroinitializer
+  ret <4 x double> %res
+}
+
+define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2 x double> %a2) {
+; X32-LABEL: test_mm256_mask_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp8:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastsd %xmm1, %ymm0 {%k1}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vbroadcastsd %xmm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a1 to i4
+  %arg1 = bitcast i4 %trn1 to <4 x i1>
+  %res0 = shufflevector <2 x double> %a2, <2 x double> undef, <4 x i32> zeroinitializer
+  %res1 = select <4 x i1> %arg1, <4 x double> %res0, <4 x double> %a0
+  ret <4 x double> %res1
+}
+
+define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
+; X32-LABEL: test_mm256_maskz_broadcastsd_pd:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp9:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastsd %xmm0, %ymm0 {%k1} {z}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_broadcastsd_pd:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vbroadcastsd %xmm0, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+  %trn0 = trunc i8 %a0 to i4
+  %arg0 = bitcast i4 %trn0 to <4 x i1>
+  %res0 = shufflevector <2 x double> %a1, <2 x double> undef, <4 x i32> zeroinitializer
+  %res1 = select <4 x i1> %arg0, <4 x double> %res0, <4 x double> zeroinitializer
+  ret <4 x double> %res1
+}
+
+define <4 x float> @test_mm_broadcastss_ps(<4 x float> %a0) {
+; X32-LABEL: test_mm_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastss %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastss %xmm0, %xmm0
+; X64-NEXT:    retq
+  %res = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> zeroinitializer
+  ret <4 x float> %res
+}
+
+define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x float> %a2) {
+; X32-LABEL: test_mm_mask_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp10:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastss %xmm1, %xmm0 {%k1}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vbroadcastss %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+  %trn1 = trunc i8 %a1 to i4
+  %arg1 = bitcast i4 %trn1 to <4 x i1>
+  %res0 = shufflevector <4 x float> %a2, <4 x float> undef, <4 x i32> zeroinitializer
+  %res1 = select <4 x i1> %arg1, <4 x float> %res0, <4 x float> %a0
+  ret <4 x float> %res1
+}
+
+define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
+; X32-LABEL: test_mm_maskz_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %eax
+; X32-NEXT:  .Ltmp11:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    andb $15, %al
+; X32-NEXT:    movb %al, (%esp)
+; X32-NEXT:    movzbl (%esp), %eax
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastss %xmm0, %xmm0 {%k1} {z}
+; X32-NEXT:    popl %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    andb $15, %dil
+; X64-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vbroadcastss %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+  %trn0 = trunc i8 %a0 to i4
+  %arg0 = bitcast i4 %trn0 to <4 x i1>
+  %res0 = shufflevector <4 x float> %a1, <4 x float> undef, <4 x i32> zeroinitializer
+  %res1 = select <4 x i1> %arg0, <4 x float> %res0, <4 x float> zeroinitializer
+  ret <4 x float> %res1
+}
+
+define <8 x float> @test_mm256_broadcastss_ps(<4 x float> %a0) {
+; X32-LABEL: test_mm256_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    vbroadcastss %xmm0, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    vbroadcastss %xmm0, %ymm0
+; X64-NEXT:    retq
+  %res = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> zeroinitializer
+  ret <8 x float> %res
+}
+
+define <8 x float> @test_mm256_mask_broadcastss_ps(<8 x float> %a0, i8 %a1, <4 x float> %a2) {
+; X32-LABEL: test_mm256_mask_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastss %xmm1, %ymm0 {%k1}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vbroadcastss %xmm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+  %arg1 = bitcast i8 %a1 to <8 x i1>
+  %res0 = shufflevector <4 x float> %a2, <4 x float> undef, <8 x i32> zeroinitializer
+  %res1 = select <8 x i1> %arg1, <8 x float> %res0, <8 x float> %a0
+  ret <8 x float> %res1
+}
+
+define <8 x float> @test_mm256_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
+; X32-LABEL: test_mm256_maskz_broadcastss_ps:
+; X32:       # BB#0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    kmovw %eax, %k1
+; X32-NEXT:    vbroadcastss %xmm0, %ymm0 {%k1} {z}
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_broadcastss_ps:
+; X64:       # BB#0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vbroadcastss %xmm0, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+  %arg0 = bitcast i8 %a0 to <8 x i1>
+  %res0 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> zeroinitializer
+  %res1 = select <8 x i1> %arg0, <8 x float> %res0, <8 x float> zeroinitializer
+  ret <8 x float> %res1
+}
+
 define <2 x double> @test_mm_movddup_pd(<2 x double> %a0) {
 ; X32-LABEL: test_mm_movddup_pd:
 ; X32:       # BB#0:
@@ -22,7 +584,7 @@ define <2 x double> @test_mm_mask_movddu
 ; X32-LABEL: test_mm_mask_movddup_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp0:
+; X32-NEXT:  .Ltmp12:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $3, %al
@@ -52,7 +614,7 @@ define <2 x double> @test_mm_maskz_movdd
 ; X32-LABEL: test_mm_maskz_movddup_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp1:
+; X32-NEXT:  .Ltmp13:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $3, %al
@@ -96,7 +658,7 @@ define <4 x double> @test_mm256_mask_mov
 ; X32-LABEL: test_mm256_mask_movddup_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp2:
+; X32-NEXT:  .Ltmp14:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -126,7 +688,7 @@ define <4 x double> @test_mm256_maskz_mo
 ; X32-LABEL: test_mm256_maskz_movddup_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp3:
+; X32-NEXT:  .Ltmp15:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -170,7 +732,7 @@ define <4 x float> @test_mm_mask_movehdu
 ; X32-LABEL: test_mm_mask_movehdup_ps:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp4:
+; X32-NEXT:  .Ltmp16:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -200,7 +762,7 @@ define <4 x float> @test_mm_maskz_movehd
 ; X32-LABEL: test_mm_maskz_movehdup_ps:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp5:
+; X32-NEXT:  .Ltmp17:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -296,7 +858,7 @@ define <4 x float> @test_mm_mask_moveldu
 ; X32-LABEL: test_mm_mask_moveldup_ps:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp6:
+; X32-NEXT:  .Ltmp18:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -326,7 +888,7 @@ define <4 x float> @test_mm_maskz_moveld
 ; X32-LABEL: test_mm_maskz_moveldup_ps:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp7:
+; X32-NEXT:  .Ltmp19:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -422,7 +984,7 @@ define <4 x i64> @test_mm256_mask_permut
 ; X32-LABEL: test_mm256_mask_permutex_epi64:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp8:
+; X32-NEXT:  .Ltmp20:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -452,7 +1014,7 @@ define <4 x i64> @test_mm256_maskz_permu
 ; X32-LABEL: test_mm256_maskz_permutex_epi64:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp9:
+; X32-NEXT:  .Ltmp21:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -496,7 +1058,7 @@ define <4 x double> @test_mm256_mask_per
 ; X32-LABEL: test_mm256_mask_permutex_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp10:
+; X32-NEXT:  .Ltmp22:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -526,7 +1088,7 @@ define <4 x double> @test_mm256_maskz_pe
 ; X32-LABEL: test_mm256_maskz_permutex_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp11:
+; X32-NEXT:  .Ltmp23:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -570,7 +1132,7 @@ define <2 x double> @test_mm_mask_shuffl
 ; X32-LABEL: test_mm_mask_shuffle_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp12:
+; X32-NEXT:  .Ltmp24:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $3, %al
@@ -600,7 +1162,7 @@ define <2 x double> @test_mm_maskz_shuff
 ; X32-LABEL: test_mm_maskz_shuffle_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp13:
+; X32-NEXT:  .Ltmp25:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $3, %al
@@ -644,7 +1206,7 @@ define <4 x double> @test_mm256_mask_shu
 ; X32-LABEL: test_mm256_mask_shuffle_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp14:
+; X32-NEXT:  .Ltmp26:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -674,7 +1236,7 @@ define <4 x double> @test_mm256_maskz_sh
 ; X32-LABEL: test_mm256_maskz_shuffle_pd:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp15:
+; X32-NEXT:  .Ltmp27:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -718,7 +1280,7 @@ define <4 x float> @test_mm_mask_shuffle
 ; X32-LABEL: test_mm_mask_shuffle_ps:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp16:
+; X32-NEXT:  .Ltmp28:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al
@@ -748,7 +1310,7 @@ define <4 x float> @test_mm_maskz_shuffl
 ; X32-LABEL: test_mm_maskz_shuffle_ps:
 ; X32:       # BB#0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:  .Ltmp17:
+; X32-NEXT:  .Ltmp29:
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $15, %al




More information about the llvm-commits mailing list