[llvm] c89b3af - [X86] Pre-commit test cases for D87863. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 20 15:14:55 PDT 2020


Author: Craig Topper
Date: 2020-09-20T13:53:05-07:00
New Revision: c89b3af0e3e970820ed35798ab2516459a8d829d

URL: https://github.com/llvm/llvm-project/commit/c89b3af0e3e970820ed35798ab2516459a8d829d
DIFF: https://github.com/llvm/llvm-project/commit/c89b3af0e3e970820ed35798ab2516459a8d829d.diff

LOG: [X86] Pre-commit test cases for D87863. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/masked_load.ll
    llvm/test/CodeGen/X86/masked_store.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/masked_load.ll b/llvm/test/CodeGen/X86/masked_load.ll
index 2d2fb44cdfbd..44938d029aed 100644
--- a/llvm/test/CodeGen/X86/masked_load.ll
+++ b/llvm/test/CodeGen/X86/masked_load.ll
@@ -7175,36 +7175,138 @@ define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %v
   ret <8 x double> %res
 }
 
+define <16 x i64> @load_one_mask_bit_set6(<16 x i64>* %addr, <16 x i64> %val) {
+; SSE2-LABEL: load_one_mask_bit_set6:
+; SSE2:       ## %bb.0:
+; SSE2-NEXT:    movq %rdi, %rax
+; SSE2-NEXT:    movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; SSE2-NEXT:    movlps {{.*#+}} xmm5 = mem[0,1],xmm5[2,3]
+; SSE2-NEXT:    movsd {{.*#+}} xmm8 = mem[0],zero
+; SSE2-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm8[0]
+; SSE2-NEXT:    movaps %xmm7, 112(%rdi)
+; SSE2-NEXT:    movaps %xmm4, 64(%rdi)
+; SSE2-NEXT:    movaps %xmm3, 48(%rdi)
+; SSE2-NEXT:    movaps %xmm2, 32(%rdi)
+; SSE2-NEXT:    movaps %xmm0, (%rdi)
+; SSE2-NEXT:    movaps %xmm5, 80(%rdi)
+; SSE2-NEXT:    movaps %xmm1, 16(%rdi)
+; SSE2-NEXT:    movaps %xmm6, 96(%rdi)
+; SSE2-NEXT:    retq
+;
+; SSE42-LABEL: load_one_mask_bit_set6:
+; SSE42:       ## %bb.0:
+; SSE42-NEXT:    movq %rdi, %rax
+; SSE42-NEXT:    pinsrq $0, 16(%rsi), %xmm1
+; SSE42-NEXT:    pinsrq $0, 80(%rsi), %xmm5
+; SSE42-NEXT:    pinsrq $1, 104(%rsi), %xmm6
+; SSE42-NEXT:    movaps %xmm7, 112(%rdi)
+; SSE42-NEXT:    movaps %xmm4, 64(%rdi)
+; SSE42-NEXT:    movaps %xmm3, 48(%rdi)
+; SSE42-NEXT:    movaps %xmm2, 32(%rdi)
+; SSE42-NEXT:    movaps %xmm0, (%rdi)
+; SSE42-NEXT:    movdqa %xmm6, 96(%rdi)
+; SSE42-NEXT:    movdqa %xmm5, 80(%rdi)
+; SSE42-NEXT:    movdqa %xmm1, 16(%rdi)
+; SSE42-NEXT:    retq
+;
+; AVX1-LABEL: load_one_mask_bit_set6:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [0,0,18446744073709551615,0]
+; AVX1-NEXT:    vmaskmovpd (%rdi), %ymm4, %ymm5
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3]
+; AVX1-NEXT:    vmaskmovpd 64(%rdi), %ymm4, %ymm4
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3]
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [0,18446744073709551615,0,0]
+; AVX1-NEXT:    vmaskmovpd 96(%rdi), %ymm4, %ymm4
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: load_one_mask_bit_set6:
+; AVX2:       ## %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,0,18446744073709551615,0]
+; AVX2-NEXT:    vpmaskmovq (%rdi), %ymm4, %ymm5
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
+; AVX2-NEXT:    vpmaskmovq 64(%rdi), %ymm4, %ymm4
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5],ymm2[6,7]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,18446744073709551615,0,0]
+; AVX2-NEXT:    vpmaskmovq 96(%rdi), %ymm4, %ymm4
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: load_one_mask_bit_set6:
+; AVX512F:       ## %bb.0:
+; AVX512F-NEXT:    movb $4, %al
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1}
+; AVX512F-NEXT:    movb $36, %al
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vmovdqu64 64(%rdi), %zmm1 {%k1}
+; AVX512F-NEXT:    retq
+;
+; AVX512VLDQ-LABEL: load_one_mask_bit_set6:
+; AVX512VLDQ:       ## %bb.0:
+; AVX512VLDQ-NEXT:    movb $4, %al
+; AVX512VLDQ-NEXT:    kmovw %eax, %k1
+; AVX512VLDQ-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1}
+; AVX512VLDQ-NEXT:    movb $36, %al
+; AVX512VLDQ-NEXT:    kmovw %eax, %k1
+; AVX512VLDQ-NEXT:    vmovdqu64 64(%rdi), %zmm1 {%k1}
+; AVX512VLDQ-NEXT:    retq
+;
+; AVX512VLBW-LABEL: load_one_mask_bit_set6:
+; AVX512VLBW:       ## %bb.0:
+; AVX512VLBW-NEXT:    movb $4, %al
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1}
+; AVX512VLBW-NEXT:    movb $36, %al
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu64 64(%rdi), %zmm1 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; X86-AVX512-LABEL: load_one_mask_bit_set6:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    movb $4, %cl
+; X86-AVX512-NEXT:    kmovd %ecx, %k1
+; X86-AVX512-NEXT:    vmovdqu64 (%eax), %zmm0 {%k1}
+; X86-AVX512-NEXT:    movb $36, %cl
+; X86-AVX512-NEXT:    kmovd %ecx, %k1
+; X86-AVX512-NEXT:    vmovdqu64 64(%eax), %zmm1 {%k1}
+; X86-AVX512-NEXT:    retl
+  %res = call <16 x i64> @llvm.masked.load.v16i64.p0v16i64(<16 x i64>* %addr, i32 4, <16 x i1> <i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false>, <16 x i64> %val)
+  ret <16 x i64> %res
+}
+
 define i32 @pr38986(i1 %c, i32* %p) {
 ; SSE-LABEL: pr38986:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    testb $1, %dil
 ; SSE-NEXT:    ## implicit-def: $eax
-; SSE-NEXT:    je LBB44_2
+; SSE-NEXT:    je LBB45_2
 ; SSE-NEXT:  ## %bb.1: ## %cond.load
 ; SSE-NEXT:    movl (%rsi), %eax
-; SSE-NEXT:  LBB44_2: ## %else
+; SSE-NEXT:  LBB45_2: ## %else
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: pr38986:
 ; AVX:       ## %bb.0:
 ; AVX-NEXT:    testb $1, %dil
 ; AVX-NEXT:    ## implicit-def: $eax
-; AVX-NEXT:    je LBB44_2
+; AVX-NEXT:    je LBB45_2
 ; AVX-NEXT:  ## %bb.1: ## %cond.load
 ; AVX-NEXT:    movl (%rsi), %eax
-; AVX-NEXT:  LBB44_2: ## %else
+; AVX-NEXT:  LBB45_2: ## %else
 ; AVX-NEXT:    retq
 ;
 ; X86-AVX512-LABEL: pr38986:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; X86-AVX512-NEXT:    ## implicit-def: $eax
-; X86-AVX512-NEXT:    je LBB44_2
+; X86-AVX512-NEXT:    je LBB45_2
 ; X86-AVX512-NEXT:  ## %bb.1: ## %cond.load
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512-NEXT:    movl (%eax), %eax
-; X86-AVX512-NEXT:  LBB44_2: ## %else
+; X86-AVX512-NEXT:  LBB45_2: ## %else
 ; X86-AVX512-NEXT:    retl
  %vc = insertelement <1 x i1> undef, i1 %c, i32 0
  %vp = bitcast i32* %p to <1 x i32>*
@@ -7240,6 +7342,7 @@ declare <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>*, i32, <8 x i1>,
 declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
 declare <2 x float> @llvm.masked.load.v2f32.p0v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>)
 
+declare <16 x i64> @llvm.masked.load.v16i64.p0v16i64(<16 x i64>*, i32, <16 x i1>, <16 x i64>)
 declare <8 x i64> @llvm.masked.load.v8i64.p0v8i64(<8 x i64>*, i32, <8 x i1>, <8 x i64>)
 declare <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>*, i32, <4 x i1>, <4 x i64>)
 declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)

diff  --git a/llvm/test/CodeGen/X86/masked_store.ll b/llvm/test/CodeGen/X86/masked_store.ll
index 417463226063..319d946794b7 100644
--- a/llvm/test/CodeGen/X86/masked_store.ll
+++ b/llvm/test/CodeGen/X86/masked_store.ll
@@ -4660,100 +4660,118 @@ define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr,
 
 ; Make sure we are able to detect all ones constant mask after type legalization
 ; to avoid masked stores.
-define void @mstore_constmask_allones_split(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
+define void @mstore_constmask_allones_split(<16 x i64> %trigger, <16 x i64>* %addr, <16 x i64> %val) {
 ; SSE2-LABEL: mstore_constmask_allones_split:
 ; SSE2:       ## %bb.0:
-; SSE2-NEXT:    movd %xmm4, (%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
-; SSE2-NEXT:    movd %xmm0, 4(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[3,3,3,3]
-; SSE2-NEXT:    movd %xmm0, 12(%rdi)
-; SSE2-NEXT:    movd %xmm5, 16(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; SSE2-NEXT:    movd %xmm0, 24(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[3,3,3,3]
-; SSE2-NEXT:    movd %xmm0, 28(%rdi)
-; SSE2-NEXT:    movd %xmm6, 32(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
-; SSE2-NEXT:    movd %xmm0, 36(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE2-NEXT:    movd %xmm0, 40(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[3,3,3,3]
-; SSE2-NEXT:    movd %xmm0, 44(%rdi)
-; SSE2-NEXT:    movd %xmm7, 48(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
-; SSE2-NEXT:    movd %xmm0, 52(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE2-NEXT:    movd %xmm0, 56(%rdi)
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[3,3,3,3]
-; SSE2-NEXT:    movd %xmm0, 60(%rdi)
+; SSE2-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm0
+; SSE2-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm1
+; SSE2-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm2
+; SSE2-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm3
+; SSE2-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm4
+; SSE2-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm5
+; SSE2-NEXT:    movq %xmm5, (%rdi)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; SSE2-NEXT:    movq %xmm5, 8(%rdi)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = mem[2,3,2,3]
+; SSE2-NEXT:    movq %xmm5, 24(%rdi)
+; SSE2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movq %rax, 32(%rdi)
+; SSE2-NEXT:    movq %xmm4, 48(%rdi)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; SSE2-NEXT:    movq %xmm4, 56(%rdi)
+; SSE2-NEXT:    movq %xmm3, 64(%rdi)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE2-NEXT:    movq %xmm3, 72(%rdi)
+; SSE2-NEXT:    movq %xmm2, 80(%rdi)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE2-NEXT:    movq %xmm2, 88(%rdi)
+; SSE2-NEXT:    movq %xmm1, 96(%rdi)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE2-NEXT:    movq %xmm1, 104(%rdi)
+; SSE2-NEXT:    movq %xmm0, 112(%rdi)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT:    movq %xmm0, 120(%rdi)
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: mstore_constmask_allones_split:
 ; SSE4:       ## %bb.0:
-; SSE4-NEXT:    movss %xmm4, (%rdi)
-; SSE4-NEXT:    extractps $1, %xmm4, 4(%rdi)
-; SSE4-NEXT:    extractps $3, %xmm4, 12(%rdi)
-; SSE4-NEXT:    movd %xmm5, 16(%rdi)
-; SSE4-NEXT:    movdqa %xmm7, %xmm0
-; SSE4-NEXT:    palignr {{.*#+}} xmm0 = xmm6[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; SSE4-NEXT:    palignr {{.*#+}} xmm6 = xmm5[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; SSE4-NEXT:    movdqu %xmm6, 24(%rdi)
-; SSE4-NEXT:    movdqu %xmm0, 40(%rdi)
-; SSE4-NEXT:    pextrd $2, %xmm7, 56(%rdi)
-; SSE4-NEXT:    pextrd $3, %xmm7, 60(%rdi)
+; SSE4-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0
+; SSE4-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1
+; SSE4-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2
+; SSE4-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm3
+; SSE4-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm4
+; SSE4-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm5
+; SSE4-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm6
+; SSE4-NEXT:    movups %xmm6, (%rdi)
+; SSE4-NEXT:    palignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
+; SSE4-NEXT:    movdqu %xmm5, 24(%rdi)
+; SSE4-NEXT:    movups %xmm4, 48(%rdi)
+; SSE4-NEXT:    movups %xmm3, 64(%rdi)
+; SSE4-NEXT:    movups %xmm2, 80(%rdi)
+; SSE4-NEXT:    movups %xmm1, 96(%rdi)
+; SSE4-NEXT:    movups %xmm0, 112(%rdi)
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: mstore_constmask_allones_split:
 ; AVX1:       ## %bb.0:
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm0 = [4294967295,4294967295,0,4294967295,4294967295,0,4294967295,4294967295]
-; AVX1-NEXT:    vmaskmovps %ymm2, %ymm0, (%rdi)
-; AVX1-NEXT:    vmovups %ymm3, 32(%rdi)
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm0 = [18446744073709551615,0,18446744073709551615,18446744073709551615]
+; AVX1-NEXT:    vmaskmovpd %ymm5, %ymm0, 32(%rdi)
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,0,18446744073709551615]
+; AVX1-NEXT:    vmaskmovpd %ymm4, %ymm0, (%rdi)
+; AVX1-NEXT:    vmovups %ymm7, 96(%rdi)
+; AVX1-NEXT:    vmovups %ymm6, 64(%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: mstore_constmask_allones_split:
 ; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm0 = [4294967295,4294967295,0,4294967295,4294967295,0,4294967295,4294967295]
-; AVX2-NEXT:    vpmaskmovd %ymm2, %ymm0, (%rdi)
-; AVX2-NEXT:    vmovups %ymm3, 32(%rdi)
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm0 = [18446744073709551615,0,18446744073709551615,18446744073709551615]
+; AVX2-NEXT:    vpmaskmovq %ymm5, %ymm0, 32(%rdi)
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,0,18446744073709551615]
+; AVX2-NEXT:    vpmaskmovq %ymm4, %ymm0, (%rdi)
+; AVX2-NEXT:    vmovups %ymm7, 96(%rdi)
+; AVX2-NEXT:    vmovups %ymm6, 64(%rdi)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: mstore_constmask_allones_split:
 ; AVX512F:       ## %bb.0:
-; AVX512F-NEXT:    movw $-37, %ax
+; AVX512F-NEXT:    movb $-37, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqu32 %zmm1, (%rdi) {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm2, (%rdi) {%k1}
+; AVX512F-NEXT:    vmovups %zmm3, 64(%rdi)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VLDQ-LABEL: mstore_constmask_allones_split:
 ; AVX512VLDQ:       ## %bb.0:
-; AVX512VLDQ-NEXT:    movw $-37, %ax
+; AVX512VLDQ-NEXT:    movb $-37, %al
 ; AVX512VLDQ-NEXT:    kmovw %eax, %k1
-; AVX512VLDQ-NEXT:    vmovdqu32 %zmm1, (%rdi) {%k1}
+; AVX512VLDQ-NEXT:    vmovdqu64 %zmm2, (%rdi) {%k1}
+; AVX512VLDQ-NEXT:    vmovups %zmm3, 64(%rdi)
 ; AVX512VLDQ-NEXT:    vzeroupper
 ; AVX512VLDQ-NEXT:    retq
 ;
 ; AVX512VLBW-LABEL: mstore_constmask_allones_split:
 ; AVX512VLBW:       ## %bb.0:
-; AVX512VLBW-NEXT:    movw $-37, %ax
+; AVX512VLBW-NEXT:    movb $-37, %al
 ; AVX512VLBW-NEXT:    kmovd %eax, %k1
-; AVX512VLBW-NEXT:    vmovdqu32 %zmm1, (%rdi) {%k1}
+; AVX512VLBW-NEXT:    vmovdqu64 %zmm2, (%rdi) {%k1}
+; AVX512VLBW-NEXT:    vmovups %zmm3, 64(%rdi)
 ; AVX512VLBW-NEXT:    vzeroupper
 ; AVX512VLBW-NEXT:    retq
 ;
 ; X86-AVX512-LABEL: mstore_constmask_allones_split:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movw $-37, %cx
+; X86-AVX512-NEXT:    movb $-37, %cl
 ; X86-AVX512-NEXT:    kmovd %ecx, %k1
-; X86-AVX512-NEXT:    vmovdqu32 %zmm1, (%eax) {%k1}
+; X86-AVX512-NEXT:    vmovdqu64 %zmm2, (%eax) {%k1}
+; X86-AVX512-NEXT:    vmovups %zmm3, 64(%eax)
 ; X86-AVX512-NEXT:    vzeroupper
 ; X86-AVX512-NEXT:    retl
-  %mask = icmp eq <16 x i32> %trigger, zeroinitializer
-  call void @llvm.masked.store.v16i32.p0v16i32(<16 x i32> %val, <16 x i32>* %addr, i32 4, <16 x i1><i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %mask = icmp eq <16 x i64> %trigger, zeroinitializer
+  call void @llvm.masked.store.v16i64.p0v16i64(<16 x i64> %val, <16 x i64>* %addr, i32 4, <16 x i1><i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
@@ -4892,6 +4910,87 @@ define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
   ret void
 }
 
+; Try one elt in each half of a vector that needs to split
+define void @one_mask_bit_set6(<16 x i64>* %addr, <16 x i64> %val) {
+; SSE2-LABEL: one_mask_bit_set6:
+; SSE2:       ## %bb.0:
+; SSE2-NEXT:    movlps %xmm3, 48(%rdi)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE2-NEXT:    movq %xmm0, 88(%rdi)
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: one_mask_bit_set6:
+; SSE4:       ## %bb.0:
+; SSE4-NEXT:    movlps %xmm3, 48(%rdi)
+; SSE4-NEXT:    pextrq $1, %xmm5, 88(%rdi)
+; SSE4-NEXT:    retq
+;
+; AVX1-LABEL: one_mask_bit_set6:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm0 = [0,0,0,18446744073709551615]
+; AVX1-NEXT:    vmaskmovpd %ymm2, %ymm0, 64(%rdi)
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm0 = [0,0,18446744073709551615,0]
+; AVX1-NEXT:    vmaskmovpd %ymm1, %ymm0, 32(%rdi)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: one_mask_bit_set6:
+; AVX2:       ## %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,0,0,18446744073709551615]
+; AVX2-NEXT:    vpmaskmovq %ymm2, %ymm0, 64(%rdi)
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,0,18446744073709551615,0]
+; AVX2-NEXT:    vpmaskmovq %ymm1, %ymm0, 32(%rdi)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: one_mask_bit_set6:
+; AVX512F:       ## %bb.0:
+; AVX512F-NEXT:    movb $8, %al
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, 64(%rdi) {%k1}
+; AVX512F-NEXT:    movb $64, %al
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rdi) {%k1}
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VLDQ-LABEL: one_mask_bit_set6:
+; AVX512VLDQ:       ## %bb.0:
+; AVX512VLDQ-NEXT:    movb $8, %al
+; AVX512VLDQ-NEXT:    kmovw %eax, %k1
+; AVX512VLDQ-NEXT:    vmovdqu64 %zmm1, 64(%rdi) {%k1}
+; AVX512VLDQ-NEXT:    movb $64, %al
+; AVX512VLDQ-NEXT:    kmovw %eax, %k1
+; AVX512VLDQ-NEXT:    vmovdqu64 %zmm0, (%rdi) {%k1}
+; AVX512VLDQ-NEXT:    vzeroupper
+; AVX512VLDQ-NEXT:    retq
+;
+; AVX512VLBW-LABEL: one_mask_bit_set6:
+; AVX512VLBW:       ## %bb.0:
+; AVX512VLBW-NEXT:    movb $8, %al
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu64 %zmm1, 64(%rdi) {%k1}
+; AVX512VLBW-NEXT:    movb $64, %al
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu64 %zmm0, (%rdi) {%k1}
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; X86-AVX512-LABEL: one_mask_bit_set6:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    movb $8, %cl
+; X86-AVX512-NEXT:    kmovd %ecx, %k1
+; X86-AVX512-NEXT:    vmovdqu64 %zmm1, 64(%eax) {%k1}
+; X86-AVX512-NEXT:    movb $64, %cl
+; X86-AVX512-NEXT:    kmovd %ecx, %k1
+; X86-AVX512-NEXT:    vmovdqu64 %zmm0, (%eax) {%k1}
+; X86-AVX512-NEXT:    vzeroupper
+; X86-AVX512-NEXT:    retl
+  call void @llvm.masked.store.v16i64.p0v16i64(<16 x i64> %val, <16 x i64>* %addr, i32 4, <16 x i1><i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>)
+  ret void
+}
+
 ; SimplifyDemandedBits eliminates an ashr here.
 
 define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, <4 x double>* %p, <4 x i32> %masksrc) {
@@ -4900,31 +4999,31 @@ define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, <4 x doub
 ; SSE-NEXT:    pslld $31, %xmm2
 ; SSE-NEXT:    movmskps %xmm2, %eax
 ; SSE-NEXT:    testb $1, %al
-; SSE-NEXT:    jne LBB24_1
+; SSE-NEXT:    jne LBB25_1
 ; SSE-NEXT:  ## %bb.2: ## %else
 ; SSE-NEXT:    testb $2, %al
-; SSE-NEXT:    jne LBB24_3
-; SSE-NEXT:  LBB24_4: ## %else2
+; SSE-NEXT:    jne LBB25_3
+; SSE-NEXT:  LBB25_4: ## %else2
 ; SSE-NEXT:    testb $4, %al
-; SSE-NEXT:    jne LBB24_5
-; SSE-NEXT:  LBB24_6: ## %else4
+; SSE-NEXT:    jne LBB25_5
+; SSE-NEXT:  LBB25_6: ## %else4
 ; SSE-NEXT:    testb $8, %al
-; SSE-NEXT:    jne LBB24_7
-; SSE-NEXT:  LBB24_8: ## %else6
+; SSE-NEXT:    jne LBB25_7
+; SSE-NEXT:  LBB25_8: ## %else6
 ; SSE-NEXT:    retq
-; SSE-NEXT:  LBB24_1: ## %cond.store
+; SSE-NEXT:  LBB25_1: ## %cond.store
 ; SSE-NEXT:    movlps %xmm0, (%rdi)
 ; SSE-NEXT:    testb $2, %al
-; SSE-NEXT:    je LBB24_4
-; SSE-NEXT:  LBB24_3: ## %cond.store1
+; SSE-NEXT:    je LBB25_4
+; SSE-NEXT:  LBB25_3: ## %cond.store1
 ; SSE-NEXT:    movhps %xmm0, 8(%rdi)
 ; SSE-NEXT:    testb $4, %al
-; SSE-NEXT:    je LBB24_6
-; SSE-NEXT:  LBB24_5: ## %cond.store3
+; SSE-NEXT:    je LBB25_6
+; SSE-NEXT:  LBB25_5: ## %cond.store3
 ; SSE-NEXT:    movlps %xmm1, 16(%rdi)
 ; SSE-NEXT:    testb $8, %al
-; SSE-NEXT:    je LBB24_8
-; SSE-NEXT:  LBB24_7: ## %cond.store5
+; SSE-NEXT:    je LBB25_8
+; SSE-NEXT:  LBB25_7: ## %cond.store5
 ; SSE-NEXT:    movhps %xmm1, 24(%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -4995,35 +5094,35 @@ define void @one_mask_bit_set1_variable(<4 x float>* %addr, <4 x float> %val, <4
 ; SSE2:       ## %bb.0:
 ; SSE2-NEXT:    movmskps %xmm1, %eax
 ; SSE2-NEXT:    testb $1, %al
-; SSE2-NEXT:    jne LBB25_1
+; SSE2-NEXT:    jne LBB26_1
 ; SSE2-NEXT:  ## %bb.2: ## %else
 ; SSE2-NEXT:    testb $2, %al
-; SSE2-NEXT:    jne LBB25_3
-; SSE2-NEXT:  LBB25_4: ## %else2
+; SSE2-NEXT:    jne LBB26_3
+; SSE2-NEXT:  LBB26_4: ## %else2
 ; SSE2-NEXT:    testb $4, %al
-; SSE2-NEXT:    jne LBB25_5
-; SSE2-NEXT:  LBB25_6: ## %else4
+; SSE2-NEXT:    jne LBB26_5
+; SSE2-NEXT:  LBB26_6: ## %else4
 ; SSE2-NEXT:    testb $8, %al
-; SSE2-NEXT:    jne LBB25_7
-; SSE2-NEXT:  LBB25_8: ## %else6
+; SSE2-NEXT:    jne LBB26_7
+; SSE2-NEXT:  LBB26_8: ## %else6
 ; SSE2-NEXT:    retq
-; SSE2-NEXT:  LBB25_1: ## %cond.store
+; SSE2-NEXT:  LBB26_1: ## %cond.store
 ; SSE2-NEXT:    movss %xmm0, (%rdi)
 ; SSE2-NEXT:    testb $2, %al
-; SSE2-NEXT:    je LBB25_4
-; SSE2-NEXT:  LBB25_3: ## %cond.store1
+; SSE2-NEXT:    je LBB26_4
+; SSE2-NEXT:  LBB26_3: ## %cond.store1
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
 ; SSE2-NEXT:    movss %xmm1, 4(%rdi)
 ; SSE2-NEXT:    testb $4, %al
-; SSE2-NEXT:    je LBB25_6
-; SSE2-NEXT:  LBB25_5: ## %cond.store3
+; SSE2-NEXT:    je LBB26_6
+; SSE2-NEXT:  LBB26_5: ## %cond.store3
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
 ; SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; SSE2-NEXT:    movss %xmm1, 8(%rdi)
 ; SSE2-NEXT:    testb $8, %al
-; SSE2-NEXT:    je LBB25_8
-; SSE2-NEXT:  LBB25_7: ## %cond.store5
+; SSE2-NEXT:    je LBB26_8
+; SSE2-NEXT:  LBB26_7: ## %cond.store5
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; SSE2-NEXT:    movss %xmm0, 12(%rdi)
 ; SSE2-NEXT:    retq
@@ -5032,31 +5131,31 @@ define void @one_mask_bit_set1_variable(<4 x float>* %addr, <4 x float> %val, <4
 ; SSE4:       ## %bb.0:
 ; SSE4-NEXT:    movmskps %xmm1, %eax
 ; SSE4-NEXT:    testb $1, %al
-; SSE4-NEXT:    jne LBB25_1
+; SSE4-NEXT:    jne LBB26_1
 ; SSE4-NEXT:  ## %bb.2: ## %else
 ; SSE4-NEXT:    testb $2, %al
-; SSE4-NEXT:    jne LBB25_3
-; SSE4-NEXT:  LBB25_4: ## %else2
+; SSE4-NEXT:    jne LBB26_3
+; SSE4-NEXT:  LBB26_4: ## %else2
 ; SSE4-NEXT:    testb $4, %al
-; SSE4-NEXT:    jne LBB25_5
-; SSE4-NEXT:  LBB25_6: ## %else4
+; SSE4-NEXT:    jne LBB26_5
+; SSE4-NEXT:  LBB26_6: ## %else4
 ; SSE4-NEXT:    testb $8, %al
-; SSE4-NEXT:    jne LBB25_7
-; SSE4-NEXT:  LBB25_8: ## %else6
+; SSE4-NEXT:    jne LBB26_7
+; SSE4-NEXT:  LBB26_8: ## %else6
 ; SSE4-NEXT:    retq
-; SSE4-NEXT:  LBB25_1: ## %cond.store
+; SSE4-NEXT:  LBB26_1: ## %cond.store
 ; SSE4-NEXT:    movss %xmm0, (%rdi)
 ; SSE4-NEXT:    testb $2, %al
-; SSE4-NEXT:    je LBB25_4
-; SSE4-NEXT:  LBB25_3: ## %cond.store1
+; SSE4-NEXT:    je LBB26_4
+; SSE4-NEXT:  LBB26_3: ## %cond.store1
 ; SSE4-NEXT:    extractps $1, %xmm0, 4(%rdi)
 ; SSE4-NEXT:    testb $4, %al
-; SSE4-NEXT:    je LBB25_6
-; SSE4-NEXT:  LBB25_5: ## %cond.store3
+; SSE4-NEXT:    je LBB26_6
+; SSE4-NEXT:  LBB26_5: ## %cond.store3
 ; SSE4-NEXT:    extractps $2, %xmm0, 8(%rdi)
 ; SSE4-NEXT:    testb $8, %al
-; SSE4-NEXT:    je LBB25_8
-; SSE4-NEXT:  LBB25_7: ## %cond.store5
+; SSE4-NEXT:    je LBB26_8
+; SSE4-NEXT:  LBB26_7: ## %cond.store5
 ; SSE4-NEXT:    extractps $3, %xmm0, 12(%rdi)
 ; SSE4-NEXT:    retq
 ;
@@ -5085,7 +5184,7 @@ define void @one_mask_bit_set1_variable(<4 x float>* %addr, <4 x float> %val, <4
 ; X86-AVX512-LABEL: one_mask_bit_set1_variable:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    vptestmd LCPI25_0{1to4}, %xmm1, %k1
+; X86-AVX512-NEXT:    vptestmd LCPI26_0{1to4}, %xmm1, %k1
 ; X86-AVX512-NEXT:    vmovups %xmm0, (%eax) {%k1}
 ; X86-AVX512-NEXT:    retl
   %mask_signbit = and <4 x i32> %mask, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
@@ -5108,25 +5207,25 @@ define void @widen_masked_store(<3 x i32> %v, <3 x i32>* %p, <3 x i1> %mask) {
 ; SSE2-NEXT:    shlb $2, %cl
 ; SSE2-NEXT:    orb %dl, %cl
 ; SSE2-NEXT:    testb $1, %cl
-; SSE2-NEXT:    jne LBB26_1
+; SSE2-NEXT:    jne LBB27_1
 ; SSE2-NEXT:  ## %bb.2: ## %else
 ; SSE2-NEXT:    testb $2, %cl
-; SSE2-NEXT:    jne LBB26_3
-; SSE2-NEXT:  LBB26_4: ## %else2
+; SSE2-NEXT:    jne LBB27_3
+; SSE2-NEXT:  LBB27_4: ## %else2
 ; SSE2-NEXT:    testb $4, %cl
-; SSE2-NEXT:    jne LBB26_5
-; SSE2-NEXT:  LBB26_6: ## %else4
+; SSE2-NEXT:    jne LBB27_5
+; SSE2-NEXT:  LBB27_6: ## %else4
 ; SSE2-NEXT:    retq
-; SSE2-NEXT:  LBB26_1: ## %cond.store
+; SSE2-NEXT:  LBB27_1: ## %cond.store
 ; SSE2-NEXT:    movd %xmm0, (%rdi)
 ; SSE2-NEXT:    testb $2, %cl
-; SSE2-NEXT:    je LBB26_4
-; SSE2-NEXT:  LBB26_3: ## %cond.store1
+; SSE2-NEXT:    je LBB27_4
+; SSE2-NEXT:  LBB27_3: ## %cond.store1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 ; SSE2-NEXT:    movd %xmm1, 4(%rdi)
 ; SSE2-NEXT:    testb $4, %cl
-; SSE2-NEXT:    je LBB26_6
-; SSE2-NEXT:  LBB26_5: ## %cond.store3
+; SSE2-NEXT:    je LBB27_6
+; SSE2-NEXT:  LBB27_5: ## %cond.store3
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; SSE2-NEXT:    movd %xmm0, 8(%rdi)
 ; SSE2-NEXT:    retq
@@ -5141,24 +5240,24 @@ define void @widen_masked_store(<3 x i32> %v, <3 x i32>* %p, <3 x i1> %mask) {
 ; SSE4-NEXT:    shlb $2, %cl
 ; SSE4-NEXT:    orb %dl, %cl
 ; SSE4-NEXT:    testb $1, %cl
-; SSE4-NEXT:    jne LBB26_1
+; SSE4-NEXT:    jne LBB27_1
 ; SSE4-NEXT:  ## %bb.2: ## %else
 ; SSE4-NEXT:    testb $2, %cl
-; SSE4-NEXT:    jne LBB26_3
-; SSE4-NEXT:  LBB26_4: ## %else2
+; SSE4-NEXT:    jne LBB27_3
+; SSE4-NEXT:  LBB27_4: ## %else2
 ; SSE4-NEXT:    testb $4, %cl
-; SSE4-NEXT:    jne LBB26_5
-; SSE4-NEXT:  LBB26_6: ## %else4
+; SSE4-NEXT:    jne LBB27_5
+; SSE4-NEXT:  LBB27_6: ## %else4
 ; SSE4-NEXT:    retq
-; SSE4-NEXT:  LBB26_1: ## %cond.store
+; SSE4-NEXT:  LBB27_1: ## %cond.store
 ; SSE4-NEXT:    movss %xmm0, (%rdi)
 ; SSE4-NEXT:    testb $2, %cl
-; SSE4-NEXT:    je LBB26_4
-; SSE4-NEXT:  LBB26_3: ## %cond.store1
+; SSE4-NEXT:    je LBB27_4
+; SSE4-NEXT:  LBB27_3: ## %cond.store1
 ; SSE4-NEXT:    extractps $1, %xmm0, 4(%rdi)
 ; SSE4-NEXT:    testb $4, %cl
-; SSE4-NEXT:    je LBB26_6
-; SSE4-NEXT:  LBB26_5: ## %cond.store3
+; SSE4-NEXT:    je LBB27_6
+; SSE4-NEXT:  LBB27_5: ## %cond.store3
 ; SSE4-NEXT:    extractps $2, %xmm0, 8(%rdi)
 ; SSE4-NEXT:    retq
 ;
@@ -5299,68 +5398,68 @@ define void @PR11210(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <2 x i64
 ; SSE2:       ## %bb.0:
 ; SSE2-NEXT:    movmskps %xmm2, %eax
 ; SSE2-NEXT:    testb $1, %al
-; SSE2-NEXT:    jne LBB28_1
+; SSE2-NEXT:    jne LBB29_1
 ; SSE2-NEXT:  ## %bb.2: ## %else
 ; SSE2-NEXT:    testb $2, %al
-; SSE2-NEXT:    jne LBB28_3
-; SSE2-NEXT:  LBB28_4: ## %else2
+; SSE2-NEXT:    jne LBB29_3
+; SSE2-NEXT:  LBB29_4: ## %else2
 ; SSE2-NEXT:    testb $4, %al
-; SSE2-NEXT:    jne LBB28_5
-; SSE2-NEXT:  LBB28_6: ## %else4
+; SSE2-NEXT:    jne LBB29_5
+; SSE2-NEXT:  LBB29_6: ## %else4
 ; SSE2-NEXT:    testb $8, %al
-; SSE2-NEXT:    jne LBB28_7
-; SSE2-NEXT:  LBB28_8: ## %else6
+; SSE2-NEXT:    jne LBB29_7
+; SSE2-NEXT:  LBB29_8: ## %else6
 ; SSE2-NEXT:    testb $1, %al
-; SSE2-NEXT:    jne LBB28_9
-; SSE2-NEXT:  LBB28_10: ## %else9
+; SSE2-NEXT:    jne LBB29_9
+; SSE2-NEXT:  LBB29_10: ## %else9
 ; SSE2-NEXT:    testb $2, %al
-; SSE2-NEXT:    jne LBB28_11
-; SSE2-NEXT:  LBB28_12: ## %else11
+; SSE2-NEXT:    jne LBB29_11
+; SSE2-NEXT:  LBB29_12: ## %else11
 ; SSE2-NEXT:    testb $4, %al
-; SSE2-NEXT:    jne LBB28_13
-; SSE2-NEXT:  LBB28_14: ## %else13
+; SSE2-NEXT:    jne LBB29_13
+; SSE2-NEXT:  LBB29_14: ## %else13
 ; SSE2-NEXT:    testb $8, %al
-; SSE2-NEXT:    jne LBB28_15
-; SSE2-NEXT:  LBB28_16: ## %else15
+; SSE2-NEXT:    jne LBB29_15
+; SSE2-NEXT:  LBB29_16: ## %else15
 ; SSE2-NEXT:    retq
-; SSE2-NEXT:  LBB28_1: ## %cond.store
+; SSE2-NEXT:  LBB29_1: ## %cond.store
 ; SSE2-NEXT:    movss %xmm0, (%rdi)
 ; SSE2-NEXT:    testb $2, %al
-; SSE2-NEXT:    je LBB28_4
-; SSE2-NEXT:  LBB28_3: ## %cond.store1
+; SSE2-NEXT:    je LBB29_4
+; SSE2-NEXT:  LBB29_3: ## %cond.store1
 ; SSE2-NEXT:    movaps %xmm0, %xmm2
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
 ; SSE2-NEXT:    movss %xmm2, 4(%rdi)
 ; SSE2-NEXT:    testb $4, %al
-; SSE2-NEXT:    je LBB28_6
-; SSE2-NEXT:  LBB28_5: ## %cond.store3
+; SSE2-NEXT:    je LBB29_6
+; SSE2-NEXT:  LBB29_5: ## %cond.store3
 ; SSE2-NEXT:    movaps %xmm0, %xmm2
 ; SSE2-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
 ; SSE2-NEXT:    movss %xmm2, 8(%rdi)
 ; SSE2-NEXT:    testb $8, %al
-; SSE2-NEXT:    je LBB28_8
-; SSE2-NEXT:  LBB28_7: ## %cond.store5
+; SSE2-NEXT:    je LBB29_8
+; SSE2-NEXT:  LBB29_7: ## %cond.store5
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; SSE2-NEXT:    movss %xmm0, 12(%rdi)
 ; SSE2-NEXT:    testb $1, %al
-; SSE2-NEXT:    je LBB28_10
-; SSE2-NEXT:  LBB28_9: ## %cond.store8
+; SSE2-NEXT:    je LBB29_10
+; SSE2-NEXT:  LBB29_9: ## %cond.store8
 ; SSE2-NEXT:    movss %xmm1, (%rdi)
 ; SSE2-NEXT:    testb $2, %al
-; SSE2-NEXT:    je LBB28_12
-; SSE2-NEXT:  LBB28_11: ## %cond.store10
+; SSE2-NEXT:    je LBB29_12
+; SSE2-NEXT:  LBB29_11: ## %cond.store10
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
 ; SSE2-NEXT:    movss %xmm0, 4(%rdi)
 ; SSE2-NEXT:    testb $4, %al
-; SSE2-NEXT:    je LBB28_14
-; SSE2-NEXT:  LBB28_13: ## %cond.store12
+; SSE2-NEXT:    je LBB29_14
+; SSE2-NEXT:  LBB29_13: ## %cond.store12
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movss %xmm0, 8(%rdi)
 ; SSE2-NEXT:    testb $8, %al
-; SSE2-NEXT:    je LBB28_16
-; SSE2-NEXT:  LBB28_15: ## %cond.store14
+; SSE2-NEXT:    je LBB29_16
+; SSE2-NEXT:  LBB29_15: ## %cond.store14
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
 ; SSE2-NEXT:    movss %xmm1, 12(%rdi)
 ; SSE2-NEXT:    retq
@@ -5369,59 +5468,59 @@ define void @PR11210(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <2 x i64
 ; SSE4:       ## %bb.0:
 ; SSE4-NEXT:    movmskps %xmm2, %eax
 ; SSE4-NEXT:    testb $1, %al
-; SSE4-NEXT:    jne LBB28_1
+; SSE4-NEXT:    jne LBB29_1
 ; SSE4-NEXT:  ## %bb.2: ## %else
 ; SSE4-NEXT:    testb $2, %al
-; SSE4-NEXT:    jne LBB28_3
-; SSE4-NEXT:  LBB28_4: ## %else2
+; SSE4-NEXT:    jne LBB29_3
+; SSE4-NEXT:  LBB29_4: ## %else2
 ; SSE4-NEXT:    testb $4, %al
-; SSE4-NEXT:    jne LBB28_5
-; SSE4-NEXT:  LBB28_6: ## %else4
+; SSE4-NEXT:    jne LBB29_5
+; SSE4-NEXT:  LBB29_6: ## %else4
 ; SSE4-NEXT:    testb $8, %al
-; SSE4-NEXT:    jne LBB28_7
-; SSE4-NEXT:  LBB28_8: ## %else6
+; SSE4-NEXT:    jne LBB29_7
+; SSE4-NEXT:  LBB29_8: ## %else6
 ; SSE4-NEXT:    testb $1, %al
-; SSE4-NEXT:    jne LBB28_9
-; SSE4-NEXT:  LBB28_10: ## %else9
+; SSE4-NEXT:    jne LBB29_9
+; SSE4-NEXT:  LBB29_10: ## %else9
 ; SSE4-NEXT:    testb $2, %al
-; SSE4-NEXT:    jne LBB28_11
-; SSE4-NEXT:  LBB28_12: ## %else11
+; SSE4-NEXT:    jne LBB29_11
+; SSE4-NEXT:  LBB29_12: ## %else11
 ; SSE4-NEXT:    testb $4, %al
-; SSE4-NEXT:    jne LBB28_13
-; SSE4-NEXT:  LBB28_14: ## %else13
+; SSE4-NEXT:    jne LBB29_13
+; SSE4-NEXT:  LBB29_14: ## %else13
 ; SSE4-NEXT:    testb $8, %al
-; SSE4-NEXT:    jne LBB28_15
-; SSE4-NEXT:  LBB28_16: ## %else15
+; SSE4-NEXT:    jne LBB29_15
+; SSE4-NEXT:  LBB29_16: ## %else15
 ; SSE4-NEXT:    retq
-; SSE4-NEXT:  LBB28_1: ## %cond.store
+; SSE4-NEXT:  LBB29_1: ## %cond.store
 ; SSE4-NEXT:    movss %xmm0, (%rdi)
 ; SSE4-NEXT:    testb $2, %al
-; SSE4-NEXT:    je LBB28_4
-; SSE4-NEXT:  LBB28_3: ## %cond.store1
+; SSE4-NEXT:    je LBB29_4
+; SSE4-NEXT:  LBB29_3: ## %cond.store1
 ; SSE4-NEXT:    extractps $1, %xmm0, 4(%rdi)
 ; SSE4-NEXT:    testb $4, %al
-; SSE4-NEXT:    je LBB28_6
-; SSE4-NEXT:  LBB28_5: ## %cond.store3
+; SSE4-NEXT:    je LBB29_6
+; SSE4-NEXT:  LBB29_5: ## %cond.store3
 ; SSE4-NEXT:    extractps $2, %xmm0, 8(%rdi)
 ; SSE4-NEXT:    testb $8, %al
-; SSE4-NEXT:    je LBB28_8
-; SSE4-NEXT:  LBB28_7: ## %cond.store5
+; SSE4-NEXT:    je LBB29_8
+; SSE4-NEXT:  LBB29_7: ## %cond.store5
 ; SSE4-NEXT:    extractps $3, %xmm0, 12(%rdi)
 ; SSE4-NEXT:    testb $1, %al
-; SSE4-NEXT:    je LBB28_10
-; SSE4-NEXT:  LBB28_9: ## %cond.store8
+; SSE4-NEXT:    je LBB29_10
+; SSE4-NEXT:  LBB29_9: ## %cond.store8
 ; SSE4-NEXT:    movss %xmm1, (%rdi)
 ; SSE4-NEXT:    testb $2, %al
-; SSE4-NEXT:    je LBB28_12
-; SSE4-NEXT:  LBB28_11: ## %cond.store10
+; SSE4-NEXT:    je LBB29_12
+; SSE4-NEXT:  LBB29_11: ## %cond.store10
 ; SSE4-NEXT:    extractps $1, %xmm1, 4(%rdi)
 ; SSE4-NEXT:    testb $4, %al
-; SSE4-NEXT:    je LBB28_14
-; SSE4-NEXT:  LBB28_13: ## %cond.store12
+; SSE4-NEXT:    je LBB29_14
+; SSE4-NEXT:  LBB29_13: ## %cond.store12
 ; SSE4-NEXT:    extractps $2, %xmm1, 8(%rdi)
 ; SSE4-NEXT:    testb $8, %al
-; SSE4-NEXT:    je LBB28_16
-; SSE4-NEXT:  LBB28_15: ## %cond.store14
+; SSE4-NEXT:    je LBB29_16
+; SSE4-NEXT:  LBB29_15: ## %cond.store14
 ; SSE4-NEXT:    extractps $3, %xmm1, 12(%rdi)
 ; SSE4-NEXT:    retq
 ;
@@ -5484,6 +5583,7 @@ declare void @llvm.masked.store.v8f32.p0v8f32(<8 x float>, <8 x float>*, i32, <8
 declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
 declare void @llvm.masked.store.v2f32.p0v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>)
 
+declare void @llvm.masked.store.v16i64.p0v16i64(<16 x i64>, <16 x i64>*, i32, <16 x i1>)
 declare void @llvm.masked.store.v8i64.p0v8i64(<8 x i64>, <8 x i64>*, i32, <8 x i1>)
 declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32, <4 x i1>)
 declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)


        


More information about the llvm-commits mailing list