[llvm] 95ff3b5 - [X86] vector-compress.ll - regenerate with missing AVX2 test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 16 06:17:58 PST 2025


Author: Simon Pilgrim
Date: 2025-01-16T14:17:33Z
New Revision: 95ff3b51672e970e1b69ca438a97d733cdd82566

URL: https://github.com/llvm/llvm-project/commit/95ff3b51672e970e1b69ca438a97d733cdd82566
DIFF: https://github.com/llvm/llvm-project/commit/95ff3b51672e970e1b69ca438a97d733cdd82566.diff

LOG: [X86] vector-compress.ll - regenerate with missing AVX2 test coverage

Shows some really poor codegen for the maskbit extraction that we should address.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vector-compress.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll
index 8185218d6236a3..87a948a4f1f7ee 100644
--- a/llvm/test/CodeGen/X86/vector-compress.ll
+++ b/llvm/test/CodeGen/X86/vector-compress.ll
@@ -513,6 +513,57 @@ define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64>
 }
 
 define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v4f64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $64, %rsp
+; AVX2-NEXT:    vpslld $31, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm1
+; AVX2-NEXT:    vpmovsxdq %xmm1, %ymm3
+; AVX2-NEXT:    vmovaps %ymm2, (%rsp)
+; AVX2-NEXT:    vpsrlq $63, %ymm3, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX2-NEXT:    vmovq %xmm1, %rcx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    andl $3, %ecx
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vmovlpd %xmm0, (%rsp)
+; AVX2-NEXT:    vmovq %xmm3, %rax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    vmovhpd %xmm0, (%rsp,%rcx,8)
+; AVX2-NEXT:    vpextrq $1, %xmm3, %rcx
+; AVX2-NEXT:    subq %rcx, %rax
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rsp,%rcx,8)
+; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT:    vmovq %xmm2, %rcx
+; AVX2-NEXT:    subq %rcx, %rax
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    andl $3, %ecx
+; AVX2-NEXT:    vmovhpd %xmm0, (%rsp,%rcx,8)
+; AVX2-NEXT:    vpextrq $1, %xmm2, %rcx
+; AVX2-NEXT:    subq %rcx, %rax
+; AVX2-NEXT:    cmpq $4, %rax
+; AVX2-NEXT:    jb .LBB7_2
+; AVX2-NEXT:  # %bb.1:
+; AVX2-NEXT:    vshufpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX2-NEXT:  .LBB7_2:
+; AVX2-NEXT:    cmpq $3, %rax
+; AVX2-NEXT:    movl $3, %ecx
+; AVX2-NEXT:    cmovbq %rax, %rcx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    vmovsd %xmm1, (%rsp,%rax,8)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
 ; AVX512F-LABEL: test_compress_v4f64:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
@@ -537,6 +588,140 @@ define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x
 }
 
 define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v16i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    pushq %r15
+; AVX2-NEXT:    pushq %r14
+; AVX2-NEXT:    pushq %r13
+; AVX2-NEXT:    pushq %r12
+; AVX2-NEXT:    pushq %rbx
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $128, %rsp
+; AVX2-NEXT:    vmovaps %ymm4, {{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovaps %ymm3, (%rsp)
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
+; AVX2-NEXT:    vpaddd %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT:    vpaddd %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpextrd $1, %xmm3, %eax
+; AVX2-NEXT:    vmovd %xmm3, %ecx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    vpextrd $2, %xmm3, %eax
+; AVX2-NEXT:    vpextrd $3, %xmm3, %edx
+; AVX2-NEXT:    addl %eax, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    andl $15, %edx
+; AVX2-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $1, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    vmovd %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $2, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $3, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $4, %xmm2, %r8d
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addq %rax, %r8
+; AVX2-NEXT:    vpextrb $5, %xmm2, %r9d
+; AVX2-NEXT:    andl $1, %r9d
+; AVX2-NEXT:    addq %r8, %r9
+; AVX2-NEXT:    vpextrb $6, %xmm2, %r10d
+; AVX2-NEXT:    andl $1, %r10d
+; AVX2-NEXT:    addq %r9, %r10
+; AVX2-NEXT:    vpextrb $7, %xmm2, %r11d
+; AVX2-NEXT:    andl $1, %r11d
+; AVX2-NEXT:    addq %r10, %r11
+; AVX2-NEXT:    vpextrb $8, %xmm2, %ebx
+; AVX2-NEXT:    andl $1, %ebx
+; AVX2-NEXT:    addq %r11, %rbx
+; AVX2-NEXT:    vpextrb $9, %xmm2, %r14d
+; AVX2-NEXT:    andl $1, %r14d
+; AVX2-NEXT:    addq %rbx, %r14
+; AVX2-NEXT:    vpextrb $10, %xmm2, %r15d
+; AVX2-NEXT:    andl $1, %r15d
+; AVX2-NEXT:    addq %r14, %r15
+; AVX2-NEXT:    vpextrb $11, %xmm2, %r12d
+; AVX2-NEXT:    andl $1, %r12d
+; AVX2-NEXT:    addq %r15, %r12
+; AVX2-NEXT:    vpextrb $12, %xmm2, %r13d
+; AVX2-NEXT:    andl $1, %r13d
+; AVX2-NEXT:    addq %r12, %r13
+; AVX2-NEXT:    vpextrb $13, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %r13, %rcx
+; AVX2-NEXT:    vpextrb $14, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    vpextrb $15, %xmm2, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rax, %rdx
+; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    cmpq $16, %rdx
+; AVX2-NEXT:    vextractps $3, %xmm2, %esi
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX2-NEXT:    cmovbl (%rsp,%rdi,4), %esi
+; AVX2-NEXT:    movl %esi, %edi
+; AVX2-NEXT:    vmovss %xmm0, (%rsp)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rsi,4)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rsi,4)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rsi,4)
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rsi,4)
+; AVX2-NEXT:    andl $15, %r8d
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%r8,4)
+; AVX2-NEXT:    andl $15, %r9d
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%r9,4)
+; AVX2-NEXT:    andl $15, %r10d
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%r10,4)
+; AVX2-NEXT:    andl $15, %r11d
+; AVX2-NEXT:    vmovss %xmm1, (%rsp,%r11,4)
+; AVX2-NEXT:    andl $15, %ebx
+; AVX2-NEXT:    vextractps $1, %xmm1, (%rsp,%rbx,4)
+; AVX2-NEXT:    andl $15, %r14d
+; AVX2-NEXT:    vextractps $2, %xmm1, (%rsp,%r14,4)
+; AVX2-NEXT:    andl $15, %r15d
+; AVX2-NEXT:    vextractps $3, %xmm1, (%rsp,%r15,4)
+; AVX2-NEXT:    andl $15, %r12d
+; AVX2-NEXT:    vmovss %xmm2, (%rsp,%r12,4)
+; AVX2-NEXT:    andl $15, %r13d
+; AVX2-NEXT:    vextractps $1, %xmm2, (%rsp,%r13,4)
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm2, (%rsp,%rcx,4)
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vextractps $3, %xmm2, (%rsp,%rax,4)
+; AVX2-NEXT:    cmpq $15, %rdx
+; AVX2-NEXT:    movl $15, %eax
+; AVX2-NEXT:    cmovbq %rdx, %rax
+; AVX2-NEXT:    movl %eax, %eax
+; AVX2-NEXT:    movl %edi, (%rsp,%rax,4)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm1
+; AVX2-NEXT:    leaq -40(%rbp), %rsp
+; AVX2-NEXT:    popq %rbx
+; AVX2-NEXT:    popq %r12
+; AVX2-NEXT:    popq %r13
+; AVX2-NEXT:    popq %r14
+; AVX2-NEXT:    popq %r15
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
 ; AVX512F-LABEL: test_compress_v16i32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
@@ -558,6 +743,134 @@ define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x
 }
 
 define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v16f32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $96, %rsp
+; AVX2-NEXT:    vmovaps %ymm4, {{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovaps %ymm3, (%rsp)
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
+; AVX2-NEXT:    vpaddd %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT:    vpaddd %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpextrd $1, %xmm3, %eax
+; AVX2-NEXT:    vmovd %xmm3, %ecx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    vpextrd $2, %xmm3, %eax
+; AVX2-NEXT:    vpextrd $3, %xmm3, %edx
+; AVX2-NEXT:    addl %eax, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    andl $15, %edx
+; AVX2-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovss %xmm0, (%rsp)
+; AVX2-NEXT:    vmovd %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rax,4)
+; AVX2-NEXT:    vpextrb $1, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    vpextrb $2, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rax,4)
+; AVX2-NEXT:    vpextrb $3, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    vpextrb $4, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    vpextrb $5, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rax,4)
+; AVX2-NEXT:    vpextrb $6, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    vpextrb $7, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rax,4)
+; AVX2-NEXT:    vpextrb $8, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vmovss %xmm1, (%rsp,%rcx,4)
+; AVX2-NEXT:    vpextrb $9, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vextractps $1, %xmm1, (%rsp,%rax,4)
+; AVX2-NEXT:    vpextrb $10, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm1, (%rsp,%rcx,4)
+; AVX2-NEXT:    vpextrb $11, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vextractps $3, %xmm1, (%rsp,%rax,4)
+; AVX2-NEXT:    vpextrb $12, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    vpextrb $13, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rax,4)
+; AVX2-NEXT:    vpextrb $14, %xmm2, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    vpextrb $15, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rdx, %rax
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $15, %edx
+; AVX2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    cmpq $16, %rax
+; AVX2-NEXT:    jae .LBB9_2
+; AVX2-NEXT:  # %bb.1:
+; AVX2-NEXT:    vmovaps %xmm3, %xmm0
+; AVX2-NEXT:  .LBB9_2:
+; AVX2-NEXT:    cmpq $15, %rax
+; AVX2-NEXT:    movl $15, %ecx
+; AVX2-NEXT:    cmovbq %rax, %rcx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rax,4)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm1
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
 ; AVX512F-LABEL: test_compress_v16f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
@@ -579,6 +892,78 @@ define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <1
 }
 
 define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v8i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    pushq %rbx
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $96, %rsp
+; AVX2-NEXT:    vmovaps %ymm4, {{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovaps %ymm3, (%rsp)
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX2-NEXT:    vpaddq %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT:    vpaddq %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpextrq $1, %xmm3, %rcx
+; AVX2-NEXT:    vmovq %xmm3, %rax
+; AVX2-NEXT:    addl %ecx, %eax
+; AVX2-NEXT:    andl $7, %eax
+; AVX2-NEXT:    vpextrw $1, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    vmovd %xmm2, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    vpextrw $2, %xmm2, %esi
+; AVX2-NEXT:    andl $1, %esi
+; AVX2-NEXT:    addq %rcx, %rsi
+; AVX2-NEXT:    vpextrw $3, %xmm2, %edi
+; AVX2-NEXT:    andl $1, %edi
+; AVX2-NEXT:    addq %rsi, %rdi
+; AVX2-NEXT:    vpextrw $4, %xmm2, %r8d
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addq %rdi, %r8
+; AVX2-NEXT:    vpextrw $5, %xmm2, %r9d
+; AVX2-NEXT:    andl $1, %r9d
+; AVX2-NEXT:    addq %r8, %r9
+; AVX2-NEXT:    vpextrw $6, %xmm2, %r10d
+; AVX2-NEXT:    andl $1, %r10d
+; AVX2-NEXT:    addq %r9, %r10
+; AVX2-NEXT:    vpextrw $7, %xmm2, %r11d
+; AVX2-NEXT:    andl $1, %r11d
+; AVX2-NEXT:    addq %r10, %r11
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpextrq $1, %xmm2, %rbx
+; AVX2-NEXT:    cmpq $8, %r11
+; AVX2-NEXT:    cmovbq (%rsp,%rax,8), %rbx
+; AVX2-NEXT:    vmovq %xmm0, (%rsp)
+; AVX2-NEXT:    vpextrq $1, %xmm0, (%rsp,%rdx,8)
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rsp,%rcx,8)
+; AVX2-NEXT:    vpextrq $1, %xmm0, (%rsp,%rsi,8)
+; AVX2-NEXT:    andl $7, %edi
+; AVX2-NEXT:    vmovq %xmm1, (%rsp,%rdi,8)
+; AVX2-NEXT:    andl $7, %r8d
+; AVX2-NEXT:    vpextrq $1, %xmm1, (%rsp,%r8,8)
+; AVX2-NEXT:    andl $7, %r9d
+; AVX2-NEXT:    vmovq %xmm2, (%rsp,%r9,8)
+; AVX2-NEXT:    andl $7, %r10d
+; AVX2-NEXT:    vpextrq $1, %xmm2, (%rsp,%r10,8)
+; AVX2-NEXT:    cmpq $7, %r11
+; AVX2-NEXT:    movl $7, %eax
+; AVX2-NEXT:    cmovbq %r11, %rax
+; AVX2-NEXT:    movl %eax, %eax
+; AVX2-NEXT:    movq %rbx, (%rsp,%rax,8)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm1
+; AVX2-NEXT:    leaq -8(%rbp), %rsp
+; AVX2-NEXT:    popq %rbx
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
 ; AVX512F-LABEL: test_compress_v8i64:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwq %xmm1, %zmm1
@@ -600,6 +985,83 @@ define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64>
 }
 
 define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v8f64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $96, %rsp
+; AVX2-NEXT:    vmovaps %ymm4, {{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovaps %ymm3, (%rsp)
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX2-NEXT:    vpaddq %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT:    vpaddq %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpextrq $1, %xmm3, %rax
+; AVX2-NEXT:    vmovq %xmm3, %rcx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    andl $7, %ecx
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX2-NEXT:    vmovlps %xmm0, (%rsp)
+; AVX2-NEXT:    vmovd %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    vmovhps %xmm0, (%rsp,%rax,8)
+; AVX2-NEXT:    vpextrw $1, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vmovlps %xmm0, (%rsp,%rcx,8)
+; AVX2-NEXT:    vpextrw $2, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    vmovhps %xmm0, (%rsp,%rax,8)
+; AVX2-NEXT:    vpextrw $3, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    vpextrw $4, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $7, %ecx
+; AVX2-NEXT:    vmovlpd %xmm1, (%rsp,%rcx,8)
+; AVX2-NEXT:    vpextrw $5, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $7, %eax
+; AVX2-NEXT:    vmovhpd %xmm1, (%rsp,%rax,8)
+; AVX2-NEXT:    vpextrw $6, %xmm2, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $7, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rsp,%rcx,8)
+; AVX2-NEXT:    vpextrw $7, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rdx, %rax
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $7, %edx
+; AVX2-NEXT:    vmovhpd %xmm0, (%rsp,%rdx,8)
+; AVX2-NEXT:    cmpq $8, %rax
+; AVX2-NEXT:    jb .LBB11_2
+; AVX2-NEXT:  # %bb.1:
+; AVX2-NEXT:    vshufpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX2-NEXT:  .LBB11_2:
+; AVX2-NEXT:    cmpq $7, %rax
+; AVX2-NEXT:    movl $7, %ecx
+; AVX2-NEXT:    cmovbq %rax, %rcx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    vmovsd %xmm3, (%rsp,%rax,8)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm1
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
 ; AVX512F-LABEL: test_compress_v8f64:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwq %xmm1, %zmm1
@@ -621,41 +1083,255 @@ define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x
 }
 
 define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) nounwind {
-; AVX512F-LABEL: test_compress_v16i8:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
-; AVX512F-NEXT:    vpslld $31, %zmm1, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k1
-; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512F-NEXT:    vpcompressd %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vpmovdb %zmm1, %xmm0
-; AVX512F-NEXT:    vzeroupper
-; AVX512F-NEXT:    retq
-;
-; AVX512VL-LABEL: test_compress_v16i8:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsllw $7, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpmovb2m %xmm1, %k1
-; AVX512VL-NEXT:    vpcompressb %xmm0, %xmm2 {%k1}
-; AVX512VL-NEXT:    vmovdqa %xmm2, %xmm0
-; AVX512VL-NEXT:    retq
-    %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru)
-    ret <16 x i8> %out
-}
-
-define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) nounwind {
-; AVX512F-LABEL: test_compress_v8i16:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpmovsxwq %xmm1, %zmm1
-; AVX512F-NEXT:    vpsllq $63, %zmm1, %zmm1
-; AVX512F-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; AVX512F-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512F-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
-; AVX512F-NEXT:    vpcompressq %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vpmovqw %zmm1, %xmm0
-; AVX512F-NEXT:    vzeroupper
-; AVX512F-NEXT:    retq
+; AVX2-LABEL: test_compress_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    pushq %r15
+; AVX2-NEXT:    pushq %r14
+; AVX2-NEXT:    pushq %r13
+; AVX2-NEXT:    pushq %r12
+; AVX2-NEXT:    pushq %rbx
+; AVX2-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpgtb %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vmovaps %xmm2, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $1, %xmm1, %r11d
+; AVX2-NEXT:    vmovd %xmm1, %eax
+; AVX2-NEXT:    movzbl %al, %edx
+; AVX2-NEXT:    # kill: def $al killed $al killed $eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    subb %r11b, %al
+; AVX2-NEXT:    vpextrb $2, %xmm1, %esi
+; AVX2-NEXT:    subb %sil, %al
+; AVX2-NEXT:    vpextrb $3, %xmm1, %r13d
+; AVX2-NEXT:    subb %r13b, %al
+; AVX2-NEXT:    vpextrb $4, %xmm1, %r12d
+; AVX2-NEXT:    subb %r12b, %al
+; AVX2-NEXT:    vpextrb $5, %xmm1, %r15d
+; AVX2-NEXT:    subb %r15b, %al
+; AVX2-NEXT:    vpextrb $6, %xmm1, %r14d
+; AVX2-NEXT:    subb %r14b, %al
+; AVX2-NEXT:    vpextrb $7, %xmm1, %ebp
+; AVX2-NEXT:    subb %bpl, %al
+; AVX2-NEXT:    vpextrb $8, %xmm1, %ebx
+; AVX2-NEXT:    subb %bl, %al
+; AVX2-NEXT:    vpextrb $9, %xmm1, %r10d
+; AVX2-NEXT:    subb %r10b, %al
+; AVX2-NEXT:    vpextrb $10, %xmm1, %r9d
+; AVX2-NEXT:    subb %r9b, %al
+; AVX2-NEXT:    vpextrb $11, %xmm1, %r8d
+; AVX2-NEXT:    subb %r8b, %al
+; AVX2-NEXT:    vpextrb $12, %xmm1, %edi
+; AVX2-NEXT:    subb %dil, %al
+; AVX2-NEXT:    vpextrb $13, %xmm1, %ecx
+; AVX2-NEXT:    movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX2-NEXT:    subb %cl, %al
+; AVX2-NEXT:    vpextrb $14, %xmm1, %ecx
+; AVX2-NEXT:    movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX2-NEXT:    subb %cl, %al
+; AVX2-NEXT:    vpextrb $15, %xmm1, %ecx
+; AVX2-NEXT:    movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX2-NEXT:    subb %cl, %al
+; AVX2-NEXT:    movzbl %al, %eax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    movzbl -40(%rsp,%rax), %eax
+; AVX2-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX2-NEXT:    vpextrb $0, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    vpextrb $1, %xmm0, -40(%rsp,%rdx)
+; AVX2-NEXT:    movzbl %r11b, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rdx, %rax
+; AVX2-NEXT:    vpextrb $2, %xmm0, -40(%rsp,%rax)
+; AVX2-NEXT:    movzbl %sil, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    vpextrb $3, %xmm0, -40(%rsp,%rcx)
+; AVX2-NEXT:    movzbl %r13b, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    vpextrb $4, %xmm0, -40(%rsp,%rax)
+; AVX2-NEXT:    movzbl %r12b, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movzbl %r15b, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $5, %xmm0, -40(%rsp,%rcx)
+; AVX2-NEXT:    movzbl %r14b, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $6, %xmm0, -40(%rsp,%rax)
+; AVX2-NEXT:    movzbl %bpl, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $7, %xmm0, -40(%rsp,%rcx)
+; AVX2-NEXT:    movzbl %bl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $8, %xmm0, -40(%rsp,%rax)
+; AVX2-NEXT:    movzbl %r10b, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $9, %xmm0, -40(%rsp,%rcx)
+; AVX2-NEXT:    movzbl %r9b, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $10, %xmm0, -40(%rsp,%rax)
+; AVX2-NEXT:    movzbl %r8b, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $11, %xmm0, -40(%rsp,%rcx)
+; AVX2-NEXT:    movzbl %dil, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $12, %xmm0, -40(%rsp,%rax)
+; AVX2-NEXT:    movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $13, %xmm0, -40(%rsp,%rcx)
+; AVX2-NEXT:    movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $14, %xmm0, -40(%rsp,%rax)
+; AVX2-NEXT:    movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $15, %xmm0, -40(%rsp,%rcx)
+; AVX2-NEXT:    cmpq $15, %rax
+; AVX2-NEXT:    movl $15, %ecx
+; AVX2-NEXT:    cmovbq %rax, %rcx
+; AVX2-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX2-NEXT:    cmovbel {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
+; AVX2-NEXT:    movb %al, -40(%rsp,%rcx)
+; AVX2-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX2-NEXT:    popq %rbx
+; AVX2-NEXT:    popq %r12
+; AVX2-NEXT:    popq %r13
+; AVX2-NEXT:    popq %r14
+; AVX2-NEXT:    popq %r15
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test_compress_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
+; AVX512F-NEXT:    vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    vpcompressd %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: test_compress_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovb2m %xmm1, %k1
+; AVX512VL-NEXT:    vpcompressb %xmm0, %xmm2 {%k1}
+; AVX512VL-NEXT:    vmovdqa %xmm2, %xmm0
+; AVX512VL-NEXT:    retq
+    %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru)
+    ret <16 x i8> %out
+}
+
+define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbx
+; AVX2-NEXT:    vpsllw $15, %xmm1, %xmm1
+; AVX2-NEXT:    vpsraw $15, %xmm1, %xmm1
+; AVX2-NEXT:    vmovaps %xmm2, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $1, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    vmovd %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    leal (%rcx,%rax), %esi
+; AVX2-NEXT:    vpextrw $2, %xmm1, %edi
+; AVX2-NEXT:    andl $1, %edi
+; AVX2-NEXT:    vpextrw $3, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    leal (%rdi,%rdx), %r10d
+; AVX2-NEXT:    addl %esi, %r10d
+; AVX2-NEXT:    vpextrw $4, %xmm1, %r9d
+; AVX2-NEXT:    andl $1, %r9d
+; AVX2-NEXT:    vpextrw $5, %xmm1, %esi
+; AVX2-NEXT:    andl $1, %esi
+; AVX2-NEXT:    leal (%r9,%rsi), %r11d
+; AVX2-NEXT:    vpextrw $6, %xmm1, %r8d
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addl %r8d, %r11d
+; AVX2-NEXT:    addl %r10d, %r11d
+; AVX2-NEXT:    vpextrw $7, %xmm1, %r10d
+; AVX2-NEXT:    andl $1, %r10d
+; AVX2-NEXT:    addl %r10d, %r11d
+; AVX2-NEXT:    andl $7, %r11d
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    addq %rax, %rdi
+; AVX2-NEXT:    addq %rdi, %rdx
+; AVX2-NEXT:    addq %rdx, %r9
+; AVX2-NEXT:    addq %r9, %rsi
+; AVX2-NEXT:    addq %rsi, %r8
+; AVX2-NEXT:    addq %r8, %r10
+; AVX2-NEXT:    vpextrw $7, %xmm0, %ebx
+; AVX2-NEXT:    cmpq $8, %r10
+; AVX2-NEXT:    cmovbw -16(%rsp,%r11,2), %bx
+; AVX2-NEXT:    vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $1, %xmm0, -16(%rsp,%rcx,2)
+; AVX2-NEXT:    vpextrw $2, %xmm0, -16(%rsp,%rax,2)
+; AVX2-NEXT:    vpextrw $3, %xmm0, -16(%rsp,%rdi,2)
+; AVX2-NEXT:    andl $7, %edx
+; AVX2-NEXT:    vpextrw $4, %xmm0, -16(%rsp,%rdx,2)
+; AVX2-NEXT:    andl $7, %r9d
+; AVX2-NEXT:    vpextrw $5, %xmm0, -16(%rsp,%r9,2)
+; AVX2-NEXT:    andl $7, %esi
+; AVX2-NEXT:    vpextrw $6, %xmm0, -16(%rsp,%rsi,2)
+; AVX2-NEXT:    andl $7, %r8d
+; AVX2-NEXT:    vpextrw $7, %xmm0, -16(%rsp,%r8,2)
+; AVX2-NEXT:    cmpq $7, %r10
+; AVX2-NEXT:    movl $7, %eax
+; AVX2-NEXT:    cmovbq %r10, %rax
+; AVX2-NEXT:    movl %eax, %eax
+; AVX2-NEXT:    movw %bx, -16(%rsp,%rax,2)
+; AVX2-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX2-NEXT:    popq %rbx
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test_compress_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovsxwq %xmm1, %zmm1
+; AVX512F-NEXT:    vpsllq $63, %zmm1, %zmm1
+; AVX512F-NEXT:    vptestmq %zmm1, %zmm1, %k1
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
+; AVX512F-NEXT:    vpcompressq %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vpmovqw %zmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: test_compress_v8i16:
 ; AVX512VL:       # %bb.0:
@@ -669,6 +1345,247 @@ define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16>
 }
 
 define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $64, %rsp
+; AVX2-NEXT:    vpsllw $7, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpgtb %ymm1, %ymm3, %ymm3
+; AVX2-NEXT:    vmovaps %ymm2, (%rsp)
+; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm1
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm4
+; AVX2-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vpaddb %xmm4, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrb $1, %xmm2, %eax
+; AVX2-NEXT:    vmovd %xmm2, %ecx
+; AVX2-NEXT:    addb %al, %cl
+; AVX2-NEXT:    vpextrb $2, %xmm2, %eax
+; AVX2-NEXT:    vpextrb $3, %xmm2, %edx
+; AVX2-NEXT:    addb %al, %dl
+; AVX2-NEXT:    addb %cl, %dl
+; AVX2-NEXT:    vpextrb $4, %xmm2, %eax
+; AVX2-NEXT:    vpextrb $5, %xmm2, %ecx
+; AVX2-NEXT:    addb %al, %cl
+; AVX2-NEXT:    vpextrb $6, %xmm2, %eax
+; AVX2-NEXT:    addb %cl, %al
+; AVX2-NEXT:    addb %dl, %al
+; AVX2-NEXT:    vpextrb $7, %xmm2, %ecx
+; AVX2-NEXT:    vpextrb $8, %xmm2, %edx
+; AVX2-NEXT:    addb %cl, %dl
+; AVX2-NEXT:    vpextrb $9, %xmm2, %ecx
+; AVX2-NEXT:    addb %dl, %cl
+; AVX2-NEXT:    vpextrb $10, %xmm2, %edx
+; AVX2-NEXT:    addb %cl, %dl
+; AVX2-NEXT:    addb %al, %dl
+; AVX2-NEXT:    vpextrb $11, %xmm2, %eax
+; AVX2-NEXT:    vpextrb $12, %xmm2, %ecx
+; AVX2-NEXT:    addb %al, %cl
+; AVX2-NEXT:    vpextrb $13, %xmm2, %eax
+; AVX2-NEXT:    addb %cl, %al
+; AVX2-NEXT:    vpextrb $14, %xmm2, %ecx
+; AVX2-NEXT:    addb %al, %cl
+; AVX2-NEXT:    vpextrb $15, %xmm2, %eax
+; AVX2-NEXT:    addb %cl, %al
+; AVX2-NEXT:    addb %dl, %al
+; AVX2-NEXT:    movzbl %al, %eax
+; AVX2-NEXT:    andl $31, %eax
+; AVX2-NEXT:    movzbl (%rsp,%rax), %eax
+; AVX2-NEXT:    vpextrb $0, %xmm0, (%rsp)
+; AVX2-NEXT:    vmovd %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    vpextrb $1, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $1, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    vpextrb $2, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $2, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    vpextrb $3, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $3, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    vpextrb $4, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $4, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    vpextrb $5, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $5, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    vpextrb $6, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $6, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $7, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $7, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $8, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $8, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $9, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $9, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $10, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $10, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $11, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $11, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $12, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $12, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $13, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $13, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $14, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $14, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $15, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $15, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vmovd %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vpextrb $0, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $1, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $1, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $2, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $2, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $3, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $3, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $4, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $4, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $5, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $5, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $6, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $6, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $7, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $7, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $8, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $8, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $9, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $9, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $10, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $10, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $11, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $11, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $12, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $12, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $13, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $13, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $14, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrb $14, %xmm0, (%rsp,%rdx)
+; AVX2-NEXT:    vpextrb $15, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrb $15, %xmm0, (%rsp,%rcx)
+; AVX2-NEXT:    cmpq $31, %rdx
+; AVX2-NEXT:    movl $31, %ecx
+; AVX2-NEXT:    cmovbq %rdx, %rcx
+; AVX2-NEXT:    vpextrb $15, %xmm0, %edx
+; AVX2-NEXT:    cmovbel %eax, %edx
+; AVX2-NEXT:    movb %dl, (%rsp,%rcx)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
 ; AVX512F-LABEL: test_compress_v32i8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
@@ -717,6 +1634,146 @@ define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8>
 }
 
 define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    pushq %r15
+; AVX2-NEXT:    pushq %r14
+; AVX2-NEXT:    pushq %r13
+; AVX2-NEXT:    pushq %r12
+; AVX2-NEXT:    pushq %rbx
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $96, %rsp
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT:    vpsllw $15, %ymm1, %ymm3
+; AVX2-NEXT:    vpsraw $15, %ymm3, %ymm1
+; AVX2-NEXT:    vmovaps %ymm2, (%rsp)
+; AVX2-NEXT:    vpsrlw $15, %ymm3, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrw $1, %xmm2, %eax
+; AVX2-NEXT:    vmovd %xmm2, %ecx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    vpextrw $2, %xmm2, %eax
+; AVX2-NEXT:    vpextrw $3, %xmm2, %edx
+; AVX2-NEXT:    addl %eax, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    vpextrw $4, %xmm2, %eax
+; AVX2-NEXT:    vpextrw $5, %xmm2, %ecx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    vpextrw $6, %xmm2, %eax
+; AVX2-NEXT:    addl %ecx, %eax
+; AVX2-NEXT:    addl %edx, %eax
+; AVX2-NEXT:    vpextrw $7, %xmm2, %ecx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrw $1, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    vmovd %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrw $2, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrw $3, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrw $4, %xmm1, %r8d
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addq %rax, %r8
+; AVX2-NEXT:    vpextrw $5, %xmm1, %r9d
+; AVX2-NEXT:    andl $1, %r9d
+; AVX2-NEXT:    addq %r8, %r9
+; AVX2-NEXT:    vpextrw $6, %xmm1, %r10d
+; AVX2-NEXT:    andl $1, %r10d
+; AVX2-NEXT:    addq %r9, %r10
+; AVX2-NEXT:    vpextrw $7, %xmm1, %r11d
+; AVX2-NEXT:    andl $1, %r11d
+; AVX2-NEXT:    addq %r10, %r11
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT:    vmovd %xmm1, %ebx
+; AVX2-NEXT:    andl $1, %ebx
+; AVX2-NEXT:    addq %r11, %rbx
+; AVX2-NEXT:    vpextrw $1, %xmm1, %r14d
+; AVX2-NEXT:    andl $1, %r14d
+; AVX2-NEXT:    addq %rbx, %r14
+; AVX2-NEXT:    vpextrw $2, %xmm1, %r15d
+; AVX2-NEXT:    andl $1, %r15d
+; AVX2-NEXT:    addq %r14, %r15
+; AVX2-NEXT:    vpextrw $3, %xmm1, %r12d
+; AVX2-NEXT:    andl $1, %r12d
+; AVX2-NEXT:    addq %r15, %r12
+; AVX2-NEXT:    vpextrw $4, %xmm1, %r13d
+; AVX2-NEXT:    andl $1, %r13d
+; AVX2-NEXT:    addq %r12, %r13
+; AVX2-NEXT:    vpextrw $5, %xmm1, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %r13, %rdx
+; AVX2-NEXT:    vpextrw $6, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    vpextrw $7, %xmm1, %edi
+; AVX2-NEXT:    andl $1, %edi
+; AVX2-NEXT:    addq %rcx, %rdi
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    cmpq $16, %rdi
+; AVX2-NEXT:    vpextrw $7, %xmm1, %eax
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT:    cmovbw (%rsp,%rsi,2), %ax
+; AVX2-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX2-NEXT:    vpextrw $0, %xmm0, (%rsp)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT:    vpextrw $1, %xmm0, (%rsp,%rsi,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT:    vpextrw $2, %xmm0, (%rsp,%rsi,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT:    vpextrw $3, %xmm0, (%rsp,%rsi,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT:    vpextrw $4, %xmm0, (%rsp,%rax,2)
+; AVX2-NEXT:    andl $15, %r8d
+; AVX2-NEXT:    vpextrw $5, %xmm0, (%rsp,%r8,2)
+; AVX2-NEXT:    andl $15, %r9d
+; AVX2-NEXT:    vpextrw $6, %xmm0, (%rsp,%r9,2)
+; AVX2-NEXT:    andl $15, %r10d
+; AVX2-NEXT:    vpextrw $7, %xmm0, (%rsp,%r10,2)
+; AVX2-NEXT:    andl $15, %r11d
+; AVX2-NEXT:    vpextrw $0, %xmm1, (%rsp,%r11,2)
+; AVX2-NEXT:    andl $15, %ebx
+; AVX2-NEXT:    vpextrw $1, %xmm1, (%rsp,%rbx,2)
+; AVX2-NEXT:    andl $15, %r14d
+; AVX2-NEXT:    vpextrw $2, %xmm1, (%rsp,%r14,2)
+; AVX2-NEXT:    andl $15, %r15d
+; AVX2-NEXT:    vpextrw $3, %xmm1, (%rsp,%r15,2)
+; AVX2-NEXT:    andl $15, %r12d
+; AVX2-NEXT:    vpextrw $4, %xmm1, (%rsp,%r12,2)
+; AVX2-NEXT:    andl $15, %r13d
+; AVX2-NEXT:    vpextrw $5, %xmm1, (%rsp,%r13,2)
+; AVX2-NEXT:    andl $15, %edx
+; AVX2-NEXT:    vpextrw $6, %xmm1, (%rsp,%rdx,2)
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrw $7, %xmm1, (%rsp,%rcx,2)
+; AVX2-NEXT:    cmpq $15, %rdi
+; AVX2-NEXT:    movl $15, %eax
+; AVX2-NEXT:    cmovbq %rdi, %rax
+; AVX2-NEXT:    movl %eax, %eax
+; AVX2-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload
+; AVX2-NEXT:    movw %cx, (%rsp,%rax,2)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    leaq -40(%rbp), %rsp
+; AVX2-NEXT:    popq %rbx
+; AVX2-NEXT:    popq %r12
+; AVX2-NEXT:    popq %r13
+; AVX2-NEXT:    popq %r14
+; AVX2-NEXT:    popq %r15
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
 ; AVX512F-LABEL: test_compress_v16i16:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
@@ -728,86 +1785,2537 @@ define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x
 ; AVX512F-NEXT:    vpmovdw %zmm1, %ymm0
 ; AVX512F-NEXT:    retq
 ;
-; AVX512VL-LABEL: test_compress_v16i16:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsllw $7, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpmovb2m %xmm1, %k1
-; AVX512VL-NEXT:    vpcompressw %ymm0, %ymm2 {%k1}
-; AVX512VL-NEXT:    vmovdqa %ymm2, %ymm0
-; AVX512VL-NEXT:    retq
-    %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru)
-    ret <16 x i16> %out
-}
-
-define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) nounwind {
-; AVX512VL-LABEL: test_compress_v64i8:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsllw $7, %zmm1, %zmm1
-; AVX512VL-NEXT:    vpmovb2m %zmm1, %k1
-; AVX512VL-NEXT:    vpcompressb %zmm0, %zmm2 {%k1}
-; AVX512VL-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512VL-NEXT:    retq
-    %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru)
-    ret <64 x i8> %out
-}
-
-define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) nounwind {
-; AVX512F-LABEL: test_compress_v32i16:
+; AVX512VL-LABEL: test_compress_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovb2m %xmm1, %k1
+; AVX512VL-NEXT:    vpcompressw %ymm0, %ymm2 {%k1}
+; AVX512VL-NEXT:    vmovdqa %ymm2, %ymm0
+; AVX512VL-NEXT:    retq
+    %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru)
+    ret <16 x i16> %out
+}
+
+define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v64i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    pushq %r15
+; AVX2-NEXT:    pushq %r14
+; AVX2-NEXT:    pushq %r13
+; AVX2-NEXT:    pushq %r12
+; AVX2-NEXT:    pushq %rbx
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $128, %rsp
+; AVX2-NEXT:    # kill: def $r9d killed $r9d def $r9
+; AVX2-NEXT:    movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    # kill: def $r8d killed $r8d def $r8
+; AVX2-NEXT:    movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    movl %ecx, %r13d
+; AVX2-NEXT:    movl %edx, %r15d
+; AVX2-NEXT:    movl %esi, %ebx
+; AVX2-NEXT:    # kill: def $edi killed $edi def $rdi
+; AVX2-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    movl 360(%rbp), %eax
+; AVX2-NEXT:    movl 352(%rbp), %ecx
+; AVX2-NEXT:    vmovd %ecx, %xmm4
+; AVX2-NEXT:    vpinsrb $1, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 368(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $2, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 376(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $3, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 384(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $4, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 392(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $5, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 400(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $6, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 408(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $7, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 416(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $8, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 424(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $9, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 432(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $10, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 440(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $11, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 448(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $12, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 456(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $13, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 464(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $14, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 472(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $15, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    movl 224(%rbp), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm5
+; AVX2-NEXT:    movl 232(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $1, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 240(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $2, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 248(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $3, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 256(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $4, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 264(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $5, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 272(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $6, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 280(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $7, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 288(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $8, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 296(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $9, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 304(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $10, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 312(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $11, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 320(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $12, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 328(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $13, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 336(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $14, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 344(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $15, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX2-NEXT:    movl 96(%rbp), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm5
+; AVX2-NEXT:    movl 104(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $1, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 112(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $2, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 120(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $3, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 128(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $4, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 136(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $5, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 144(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $6, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 152(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $7, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 160(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $8, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 168(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $9, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 176(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $10, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 184(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $11, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 192(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $12, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 200(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $13, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 208(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $14, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    movl 216(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $15, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    vmovd %edi, %xmm6
+; AVX2-NEXT:    vpinsrb $1, %esi, %xmm6, %xmm6
+; AVX2-NEXT:    vpinsrb $2, %edx, %xmm6, %xmm6
+; AVX2-NEXT:    vpinsrb $3, %r13d, %xmm6, %xmm6
+; AVX2-NEXT:    vpinsrb $4, %r8d, %xmm6, %xmm6
+; AVX2-NEXT:    vpinsrb $5, %r9d, %xmm6, %xmm6
+; AVX2-NEXT:    movl 16(%rbp), %esi
+; AVX2-NEXT:    vpinsrb $6, %esi, %xmm6, %xmm6
+; AVX2-NEXT:    movl 24(%rbp), %edi
+; AVX2-NEXT:    vpinsrb $7, %edi, %xmm6, %xmm6
+; AVX2-NEXT:    movl 32(%rbp), %r8d
+; AVX2-NEXT:    vpinsrb $8, %r8d, %xmm6, %xmm6
+; AVX2-NEXT:    movl 40(%rbp), %r9d
+; AVX2-NEXT:    vpinsrb $9, %r9d, %xmm6, %xmm6
+; AVX2-NEXT:    movl 48(%rbp), %r10d
+; AVX2-NEXT:    vpinsrb $10, %r10d, %xmm6, %xmm6
+; AVX2-NEXT:    movl 56(%rbp), %r11d
+; AVX2-NEXT:    vpinsrb $11, %r11d, %xmm6, %xmm6
+; AVX2-NEXT:    movl 64(%rbp), %r14d
+; AVX2-NEXT:    vpinsrb $12, %r14d, %xmm6, %xmm6
+; AVX2-NEXT:    movl 72(%rbp), %r12d
+; AVX2-NEXT:    vpinsrb $13, %r12d, %xmm6, %xmm6
+; AVX2-NEXT:    movl 80(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $14, %eax, %xmm6, %xmm6
+; AVX2-NEXT:    movl 88(%rbp), %eax
+; AVX2-NEXT:    vpinsrb $15, %eax, %xmm6, %xmm6
+; AVX2-NEXT:    vinserti128 $1, %xmm5, %ymm6, %ymm5
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX2-NEXT:    vpand %ymm6, %ymm5, %ymm5
+; AVX2-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddb %ymm4, %ymm5, %ymm4
+; AVX2-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX2-NEXT:    vpaddb %xmm5, %xmm4, %xmm4
+; AVX2-NEXT:    vpextrb $1, %xmm4, %eax
+; AVX2-NEXT:    vmovd %xmm4, %ecx
+; AVX2-NEXT:    addb %al, %cl
+; AVX2-NEXT:    vpextrb $2, %xmm4, %edx
+; AVX2-NEXT:    vpextrb $3, %xmm4, %eax
+; AVX2-NEXT:    addb %dl, %al
+; AVX2-NEXT:    addb %cl, %al
+; AVX2-NEXT:    vpextrb $4, %xmm4, %ecx
+; AVX2-NEXT:    vpextrb $5, %xmm4, %edx
+; AVX2-NEXT:    addb %cl, %dl
+; AVX2-NEXT:    vpextrb $6, %xmm4, %ecx
+; AVX2-NEXT:    addb %dl, %cl
+; AVX2-NEXT:    addb %al, %cl
+; AVX2-NEXT:    vpextrb $7, %xmm4, %eax
+; AVX2-NEXT:    vpextrb $8, %xmm4, %edx
+; AVX2-NEXT:    addb %al, %dl
+; AVX2-NEXT:    vpextrb $9, %xmm4, %eax
+; AVX2-NEXT:    addb %dl, %al
+; AVX2-NEXT:    vpextrb $10, %xmm4, %edx
+; AVX2-NEXT:    addb %al, %dl
+; AVX2-NEXT:    addb %cl, %dl
+; AVX2-NEXT:    vpextrb $11, %xmm4, %eax
+; AVX2-NEXT:    vpextrb $12, %xmm4, %ecx
+; AVX2-NEXT:    addb %al, %cl
+; AVX2-NEXT:    vpextrb $13, %xmm4, %eax
+; AVX2-NEXT:    addb %cl, %al
+; AVX2-NEXT:    vpextrb $14, %xmm4, %ecx
+; AVX2-NEXT:    addb %al, %cl
+; AVX2-NEXT:    vpextrb $15, %xmm4, %eax
+; AVX2-NEXT:    addb %cl, %al
+; AVX2-NEXT:    addb %dl, %al
+; AVX2-NEXT:    vmovaps %ymm3, {{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovaps %ymm2, (%rsp)
+; AVX2-NEXT:    movzbl %al, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    movzbl (%rsp,%rax), %eax
+; AVX2-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX2-NEXT:    vpextrb $0, %xmm0, (%rsp)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    vpextrb $1, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    andl $1, %ebx
+; AVX2-NEXT:    addq %rax, %rbx
+; AVX2-NEXT:    vpextrb $2, %xmm0, (%rsp,%rbx)
+; AVX2-NEXT:    andl $1, %r15d
+; AVX2-NEXT:    addq %rbx, %r15
+; AVX2-NEXT:    vpextrb $3, %xmm0, (%rsp,%r15)
+; AVX2-NEXT:    andl $1, %r13d
+; AVX2-NEXT:    addq %r15, %r13
+; AVX2-NEXT:    vpextrb $4, %xmm0, (%rsp,%r13)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %r13, %rcx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    vpextrb $5, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    andl $1, %esi
+; AVX2-NEXT:    addq %rax, %rsi
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $6, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    andl $1, %edi
+; AVX2-NEXT:    addq %rsi, %rdi
+; AVX2-NEXT:    # kill: def $esi killed $esi killed $rsi def $rsi
+; AVX2-NEXT:    andl $63, %esi
+; AVX2-NEXT:    vpextrb $7, %xmm0, (%rsp,%rsi)
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addq %rdi, %r8
+; AVX2-NEXT:    # kill: def $edi killed $edi killed $rdi def $rdi
+; AVX2-NEXT:    andl $63, %edi
+; AVX2-NEXT:    vpextrb $8, %xmm0, (%rsp,%rdi)
+; AVX2-NEXT:    andl $1, %r9d
+; AVX2-NEXT:    addq %r8, %r9
+; AVX2-NEXT:    # kill: def $r8d killed $r8d killed $r8 def $r8
+; AVX2-NEXT:    andl $63, %r8d
+; AVX2-NEXT:    vpextrb $9, %xmm0, (%rsp,%r8)
+; AVX2-NEXT:    andl $1, %r10d
+; AVX2-NEXT:    addq %r9, %r10
+; AVX2-NEXT:    # kill: def $r9d killed $r9d killed $r9 def $r9
+; AVX2-NEXT:    andl $63, %r9d
+; AVX2-NEXT:    vpextrb $10, %xmm0, (%rsp,%r9)
+; AVX2-NEXT:    andl $1, %r11d
+; AVX2-NEXT:    addq %r10, %r11
+; AVX2-NEXT:    # kill: def $r10d killed $r10d killed $r10 def $r10
+; AVX2-NEXT:    andl $63, %r10d
+; AVX2-NEXT:    vpextrb $11, %xmm0, (%rsp,%r10)
+; AVX2-NEXT:    andl $1, %r14d
+; AVX2-NEXT:    addq %r11, %r14
+; AVX2-NEXT:    # kill: def $r11d killed $r11d killed $r11 def $r11
+; AVX2-NEXT:    andl $63, %r11d
+; AVX2-NEXT:    vpextrb $12, %xmm0, (%rsp,%r11)
+; AVX2-NEXT:    andl $1, %r12d
+; AVX2-NEXT:    addq %r14, %r12
+; AVX2-NEXT:    # kill: def $r14d killed $r14d killed $r14 def $r14
+; AVX2-NEXT:    andl $63, %r14d
+; AVX2-NEXT:    vpextrb $13, %xmm0, (%rsp,%r14)
+; AVX2-NEXT:    movl 80(%rbp), %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %r12, %rax
+; AVX2-NEXT:    # kill: def $r12d killed $r12d killed $r12 def $r12
+; AVX2-NEXT:    andl $63, %r12d
+; AVX2-NEXT:    vpextrb $14, %xmm0, (%rsp,%r12)
+; AVX2-NEXT:    movl 88(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $15, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 96(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vpextrb $0, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 104(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $1, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 112(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $2, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 120(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $3, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 128(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $4, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 136(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $5, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 144(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $6, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 152(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $7, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 160(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $8, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 168(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $9, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 176(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $10, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 184(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $11, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 192(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $12, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 200(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $13, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 208(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $14, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 216(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $15, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 224(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $0, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 232(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $1, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 240(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $2, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 248(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $3, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 256(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $4, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 264(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $5, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 272(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $6, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 280(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $7, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 288(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $8, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 296(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $9, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 304(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $10, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 312(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $11, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 320(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $12, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 328(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $13, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 336(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $14, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 344(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $15, %xmm1, (%rsp,%rax)
+; AVX2-NEXT:    movl 352(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT:    vpextrb $0, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 360(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $1, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 368(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $2, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 376(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $3, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 384(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $4, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 392(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $5, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 400(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $6, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 408(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $7, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 416(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $8, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 424(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $9, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 432(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $10, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 440(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $11, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 448(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $12, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 456(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $13, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 464(%rbp), %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rcx, %rdx
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $14, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    movl 472(%rbp), %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    movl %edx, %eax
+; AVX2-NEXT:    andl $63, %eax
+; AVX2-NEXT:    vpextrb $15, %xmm0, (%rsp,%rax)
+; AVX2-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX2-NEXT:    cmpq $64, %rcx
+; AVX2-NEXT:    cmovbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
+; AVX2-NEXT:    cmpq $63, %rcx
+; AVX2-NEXT:    movq %rcx, %rdx
+; AVX2-NEXT:    movl $63, %ecx
+; AVX2-NEXT:    cmovbq %rdx, %rcx
+; AVX2-NEXT:    movb %al, (%rsp,%rcx)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm1
+; AVX2-NEXT:    leaq -40(%rbp), %rsp
+; AVX2-NEXT:    popq %rbx
+; AVX2-NEXT:    popq %r12
+; AVX2-NEXT:    popq %r13
+; AVX2-NEXT:    popq %r14
+; AVX2-NEXT:    popq %r15
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test_compress_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    pushq %rbp
+; AVX512F-NEXT:    movq %rsp, %rbp
+; AVX512F-NEXT:    andq $-64, %rsp
+; AVX512F-NEXT:    subq $256, %rsp # imm = 0x100
+; AVX512F-NEXT:    movzbl 352(%rbp), %eax
+; AVX512F-NEXT:    andl $1, %eax
+; AVX512F-NEXT:    kmovw %eax, %k0
+; AVX512F-NEXT:    movzbl 360(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-5, %ax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw %k1, %k3
+; AVX512F-NEXT:    movzbl 368(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $13, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-9, %ax
+; AVX512F-NEXT:    kmovw %eax, %k7
+; AVX512F-NEXT:    kandw %k7, %k0, %k0
+; AVX512F-NEXT:    kmovw %k7, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 376(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $12, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-17, %ax
+; AVX512F-NEXT:    kmovw %eax, %k5
+; AVX512F-NEXT:    kandw %k5, %k0, %k0
+; AVX512F-NEXT:    movzbl 384(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $11, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-33, %ax
+; AVX512F-NEXT:    kmovw %eax, %k6
+; AVX512F-NEXT:    kandw %k6, %k0, %k0
+; AVX512F-NEXT:    movzbl 392(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $10, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-65, %ax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 400(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-129, %ax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 408(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $8, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-257, %ax # imm = 0xFEFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 416(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $7, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-513, %ax # imm = 0xFDFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 424(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $6, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-1025, %ax # imm = 0xFBFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 432(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $5, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-2049, %ax # imm = 0xF7FF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 440(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $4, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-4097, %ax # imm = 0xEFFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 448(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $3, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-8193, %ax # imm = 0xDFFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 456(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $2, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k1
+; AVX512F-NEXT:    movw $-16385, %ax # imm = 0xBFFF
+; AVX512F-NEXT:    kmovw %eax, %k4
+; AVX512F-NEXT:    kandw %k4, %k1, %k1
+; AVX512F-NEXT:    kmovw %k4, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 464(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $14, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kshiftlw $1, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $1, %k1, %k1
+; AVX512F-NEXT:    movzbl 472(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 224(%rbp), %eax
+; AVX512F-NEXT:    andl $1, %eax
+; AVX512F-NEXT:    movzbl 232(%rbp), %r10d
+; AVX512F-NEXT:    kmovw %r10d, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kmovw %k3, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k3, %k1, %k1
+; AVX512F-NEXT:    movzbl 240(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $13, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 248(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $12, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k5, %k1, %k1
+; AVX512F-NEXT:    movzbl 256(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $11, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k6, %k1, %k1
+; AVX512F-NEXT:    movzbl 264(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $10, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 272(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $9, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 280(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $8, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k0, %k2
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 288(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k0, %k2
+; AVX512F-NEXT:    kshiftrw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 296(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $6, %k0, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 304(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $5, %k0, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 312(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $4, %k0, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 320(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $3, %k0, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 328(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $2, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k4, %k1, %k1
+; AVX512F-NEXT:    movzbl 336(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $14, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kshiftlw $1, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $1, %k1, %k1
+; AVX512F-NEXT:    movzbl 344(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 96(%rbp), %eax
+; AVX512F-NEXT:    andl $1, %eax
+; AVX512F-NEXT:    movzbl 104(%rbp), %r10d
+; AVX512F-NEXT:    kmovw %r10d, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kandw %k3, %k1, %k1
+; AVX512F-NEXT:    movzbl 112(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $13, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k4 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k4, %k1, %k1
+; AVX512F-NEXT:    movzbl 120(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $12, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k5, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k5, %k1, %k1
+; AVX512F-NEXT:    movzbl 128(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $11, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k6, %k1, %k1
+; AVX512F-NEXT:    movzbl 136(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $10, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 144(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $9, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 152(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $8, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k3, %k1, %k1
+; AVX512F-NEXT:    movzbl 160(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 168(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $6, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 176(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $5, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 184(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $4, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 192(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $3, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 200(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $2, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 208(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $14, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kshiftlw $1, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $1, %k1, %k1
+; AVX512F-NEXT:    movzbl 216(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    andl $1, %edi
+; AVX512F-NEXT:    kmovw %esi, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    kmovw %edi, %k2
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %edx, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $13, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k4, %k1, %k1
+; AVX512F-NEXT:    kmovw %ecx, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $12, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k5, %k1, %k1
+; AVX512F-NEXT:    kmovw %r8d, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $11, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k6, %k1, %k1
+; AVX512F-NEXT:    kmovw %r9d, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $10, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 16(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $9, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k1, %k2, %k1
+; AVX512F-NEXT:    movzbl 24(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $8, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k3, %k1, %k1
+; AVX512F-NEXT:    movzbl 32(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 40(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $6, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 48(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k5
+; AVX512F-NEXT:    kshiftrw $5, %k5, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 56(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k4
+; AVX512F-NEXT:    kshiftrw $4, %k4, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 64(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k3
+; AVX512F-NEXT:    kshiftrw $3, %k3, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 72(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $2, %k2, %k0
+; AVX512F-NEXT:    korw %k0, %k1, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 80(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $14, %k1, %k7
+; AVX512F-NEXT:    korw %k7, %k0, %k0
+; AVX512F-NEXT:    kshiftlw $1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k7
+; AVX512F-NEXT:    movzbl 88(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k0
+; AVX512F-NEXT:    kshiftlw $15, %k0, %k6
+; AVX512F-NEXT:    korw %k6, %k7, %k6
+; AVX512F-NEXT:    kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movw $-3, %ax
+; AVX512F-NEXT:    kmovw %eax, %k6
+; AVX512F-NEXT:    kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k6, %k7, %k6
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $14, %k7, %k7
+; AVX512F-NEXT:    korw %k7, %k6, %k6
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k7, %k6, %k6
+; AVX512F-NEXT:    kshiftrw $13, %k5, %k5
+; AVX512F-NEXT:    korw %k5, %k6, %k5
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k6 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k6, %k5, %k5
+; AVX512F-NEXT:    kshiftrw $12, %k4, %k4
+; AVX512F-NEXT:    korw %k4, %k5, %k4
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k5 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k5, %k4, %k4
+; AVX512F-NEXT:    kshiftrw $11, %k3, %k3
+; AVX512F-NEXT:    korw %k3, %k4, %k3
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k4 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k4, %k3, %k3
+; AVX512F-NEXT:    kshiftrw $10, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k3, %k2
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k3, %k2, %k2
+; AVX512F-NEXT:    kshiftlw $6, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kshiftlw $9, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512F-NEXT:    kshiftlw $7, %k0, %k0
+; AVX512F-NEXT:    korw %k0, %k1, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $9, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kxorw %k0, %k1, %k0
+; AVX512F-NEXT:    kshiftrw $4, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $2, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw %k0, %eax
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k7, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $13, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k6, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $12, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k5, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $11, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k4, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $10, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k3, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $6, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftlw $9, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $9, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $7, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $9, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kxorw %k0, %k1, %k0
+; AVX512F-NEXT:    kshiftrw $4, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $2, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw %k0, %ecx
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload
+; AVX512F-NEXT:    kxorw %k2, %k3, %k0
+; AVX512F-NEXT:    kshiftrw $8, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $4, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $2, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw %k0, %edx
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    vpcompressd %zmm3, %zmm3 {%k1} {z}
+; AVX512F-NEXT:    vpternlogd {{.*#+}} zmm4 {%k1} {z} = -1
+; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    vpcompressd %zmm2, %zmm2 {%k1} {z}
+; AVX512F-NEXT:    vpternlogd {{.*#+}} zmm5 {%k1} {z} = -1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpcompressd %zmm6, %zmm6 {%k3} {z}
+; AVX512F-NEXT:    vpternlogd {{.*#+}} zmm7 {%k3} {z} = -1
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpcompressd %zmm0, %zmm0 {%k2} {z}
+; AVX512F-NEXT:    vpternlogd {{.*#+}} zmm8 {%k2} {z} = -1
+; AVX512F-NEXT:    vpmovdb %zmm6, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT:    andl $31, %eax
+; AVX512F-NEXT:    vpmovdb %zmm0, 64(%rsp,%rax)
+; AVX512F-NEXT:    vpmovdb %zmm3, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT:    andl $31, %ecx
+; AVX512F-NEXT:    vpmovdb %zmm2, 96(%rsp,%rcx)
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm0
+; AVX512F-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT:    andl $63, %edx
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm0
+; AVX512F-NEXT:    vmovaps %ymm0, 128(%rsp,%rdx)
+; AVX512F-NEXT:    vpmovdb %zmm4, %xmm0
+; AVX512F-NEXT:    vpmovdb %zmm5, %xmm2
+; AVX512F-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm0, {{[0-9]+}}(%rsp), %ymm2, %ymm0
+; AVX512F-NEXT:    vpmovdb %zmm7, %xmm2
+; AVX512F-NEXT:    vpmovdb %zmm8, %xmm3
+; AVX512F-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, {{[0-9]+}}(%rsp), %ymm1, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    movq %rbp, %rsp
+; AVX512F-NEXT:    popq %rbp
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: test_compress_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $7, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VL-NEXT:    vpcompressb %zmm0, %zmm2 {%k1}
+; AVX512VL-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512VL-NEXT:    retq
+    %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru)
+    ret <64 x i8> %out
+}
+
+define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) nounwind {
+; AVX2-LABEL: test_compress_v32i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    pushq %r15
+; AVX2-NEXT:    pushq %r14
+; AVX2-NEXT:    pushq %r13
+; AVX2-NEXT:    pushq %r12
+; AVX2-NEXT:    pushq %rbx
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $256, %rsp # imm = 0x100
+; AVX2-NEXT:    vmovaps %ymm4, {{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovaps %ymm3, (%rsp)
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-NEXT:    vpbroadcastw {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX2-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT:    vpand %ymm5, %ymm6, %ymm5
+; AVX2-NEXT:    vpaddw %ymm4, %ymm5, %ymm4
+; AVX2-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX2-NEXT:    vpaddw %xmm5, %xmm4, %xmm4
+; AVX2-NEXT:    vpextrw $1, %xmm4, %eax
+; AVX2-NEXT:    vmovd %xmm4, %ecx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    vpextrw $2, %xmm4, %eax
+; AVX2-NEXT:    vpextrw $3, %xmm4, %edx
+; AVX2-NEXT:    addl %eax, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    vpextrw $4, %xmm4, %eax
+; AVX2-NEXT:    vpextrw $5, %xmm4, %ecx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    vpextrw $6, %xmm4, %eax
+; AVX2-NEXT:    addl %ecx, %eax
+; AVX2-NEXT:    addl %edx, %eax
+; AVX2-NEXT:    vpextrw $7, %xmm4, %ecx
+; AVX2-NEXT:    addl %eax, %ecx
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $1, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    vmovd %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $2, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $3, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $4, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $5, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $6, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $7, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $8, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $9, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $10, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $11, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $12, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $13, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $14, %xmm2, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $15, %xmm2, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vmovd %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $1, %xmm3, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $2, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vpextrb $3, %xmm3, %r12d
+; AVX2-NEXT:    andl $1, %r12d
+; AVX2-NEXT:    addq %rcx, %r12
+; AVX2-NEXT:    vpextrb $4, %xmm3, %r15d
+; AVX2-NEXT:    andl $1, %r15d
+; AVX2-NEXT:    addq %r12, %r15
+; AVX2-NEXT:    vpextrb $5, %xmm3, %r14d
+; AVX2-NEXT:    andl $1, %r14d
+; AVX2-NEXT:    addq %r15, %r14
+; AVX2-NEXT:    vpextrb $6, %xmm3, %ebx
+; AVX2-NEXT:    andl $1, %ebx
+; AVX2-NEXT:    addq %r14, %rbx
+; AVX2-NEXT:    vpextrb $7, %xmm3, %r11d
+; AVX2-NEXT:    andl $1, %r11d
+; AVX2-NEXT:    addq %rbx, %r11
+; AVX2-NEXT:    vpextrb $8, %xmm3, %r10d
+; AVX2-NEXT:    andl $1, %r10d
+; AVX2-NEXT:    addq %r11, %r10
+; AVX2-NEXT:    vpextrb $9, %xmm3, %r9d
+; AVX2-NEXT:    andl $1, %r9d
+; AVX2-NEXT:    addq %r10, %r9
+; AVX2-NEXT:    vpextrb $10, %xmm3, %r8d
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addq %r9, %r8
+; AVX2-NEXT:    vpextrb $11, %xmm3, %edi
+; AVX2-NEXT:    andl $1, %edi
+; AVX2-NEXT:    addq %r8, %rdi
+; AVX2-NEXT:    vpextrb $12, %xmm3, %esi
+; AVX2-NEXT:    andl $1, %esi
+; AVX2-NEXT:    addq %rdi, %rsi
+; AVX2-NEXT:    vpextrb $13, %xmm3, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rsi, %rdx
+; AVX2-NEXT:    vpextrb $14, %xmm3, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    vpextrb $15, %xmm3, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    cmpq $32, %rax
+; AVX2-NEXT:    vpextrw $7, %xmm2, %eax
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    cmovbw (%rsp,%r13,2), %ax
+; AVX2-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX2-NEXT:    vpextrw $0, %xmm0, (%rsp)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    vpextrw $1, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    vpextrw $2, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    vpextrw $3, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    vpextrw $4, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    vpextrw $5, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $6, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $7, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vpextrw $0, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $1, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $2, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $3, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $4, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $5, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $6, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $7, %xmm0, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $0, %xmm1, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $1, %xmm1, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT:    andl $31, %r13d
+; AVX2-NEXT:    vpextrw $2, %xmm1, (%rsp,%r13,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT:    andl $31, %eax
+; AVX2-NEXT:    vpextrw $3, %xmm1, (%rsp,%rax,2)
+; AVX2-NEXT:    andl $31, %r12d
+; AVX2-NEXT:    vpextrw $4, %xmm1, (%rsp,%r12,2)
+; AVX2-NEXT:    andl $31, %r15d
+; AVX2-NEXT:    vpextrw $5, %xmm1, (%rsp,%r15,2)
+; AVX2-NEXT:    andl $31, %r14d
+; AVX2-NEXT:    vpextrw $6, %xmm1, (%rsp,%r14,2)
+; AVX2-NEXT:    andl $31, %ebx
+; AVX2-NEXT:    vpextrw $7, %xmm1, (%rsp,%rbx,2)
+; AVX2-NEXT:    andl $31, %r11d
+; AVX2-NEXT:    vpextrw $0, %xmm2, (%rsp,%r11,2)
+; AVX2-NEXT:    andl $31, %r10d
+; AVX2-NEXT:    vpextrw $1, %xmm2, (%rsp,%r10,2)
+; AVX2-NEXT:    andl $31, %r9d
+; AVX2-NEXT:    vpextrw $2, %xmm2, (%rsp,%r9,2)
+; AVX2-NEXT:    andl $31, %r8d
+; AVX2-NEXT:    vpextrw $3, %xmm2, (%rsp,%r8,2)
+; AVX2-NEXT:    andl $31, %edi
+; AVX2-NEXT:    vpextrw $4, %xmm2, (%rsp,%rdi,2)
+; AVX2-NEXT:    andl $31, %esi
+; AVX2-NEXT:    vpextrw $5, %xmm2, (%rsp,%rsi,2)
+; AVX2-NEXT:    andl $31, %edx
+; AVX2-NEXT:    vpextrw $6, %xmm2, (%rsp,%rdx,2)
+; AVX2-NEXT:    andl $31, %ecx
+; AVX2-NEXT:    vpextrw $7, %xmm2, (%rsp,%rcx,2)
+; AVX2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT:    cmpq $31, %rcx
+; AVX2-NEXT:    movl $31, %eax
+; AVX2-NEXT:    cmovbq %rcx, %rax
+; AVX2-NEXT:    movl %eax, %eax
+; AVX2-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload
+; AVX2-NEXT:    movw %cx, (%rsp,%rax,2)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm1
+; AVX2-NEXT:    leaq -40(%rbp), %rsp
+; AVX2-NEXT:    popq %rbx
+; AVX2-NEXT:    popq %r12
+; AVX2-NEXT:    popq %r13
+; AVX2-NEXT:    popq %r14
+; AVX2-NEXT:    popq %r15
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test_compress_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    pushq %rbp
+; AVX512F-NEXT:    movq %rsp, %rbp
+; AVX512F-NEXT:    andq $-64, %rsp
+; AVX512F-NEXT:    subq $128, %rsp
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512F-NEXT:    vpmovsxbd %xmm5, %zmm5
+; AVX512F-NEXT:    vpslld $31, %zmm5, %zmm5
+; AVX512F-NEXT:    vptestmd %zmm5, %zmm5, %k1
+; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
+; AVX512F-NEXT:    vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpcompressd %zmm1, %zmm1 {%k2} {z}
+; AVX512F-NEXT:    vpmovdw %zmm1, (%rsp)
+; AVX512F-NEXT:    kshiftrw $8, %k2, %k0
+; AVX512F-NEXT:    kxorw %k0, %k2, %k0
+; AVX512F-NEXT:    kshiftrw $4, %k0, %k2
+; AVX512F-NEXT:    kxorw %k2, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $2, %k0, %k2
+; AVX512F-NEXT:    kxorw %k2, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k2
+; AVX512F-NEXT:    kxorw %k2, %k0, %k0
+; AVX512F-NEXT:    kmovw %k0, %eax
+; AVX512F-NEXT:    andl $31, %eax
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpcompressd %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT:    vpmovdw %zmm0, (%rsp,%rax,2)
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
+; AVX512F-NEXT:    vpsllw $15, %ymm4, %ymm1
+; AVX512F-NEXT:    vpsraw $15, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm1, {{[0-9]+}}(%rsp), %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $15, %ymm3, %ymm1
+; AVX512F-NEXT:    vpsraw $15, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm1, (%rsp), %ymm2, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    movq %rbp, %rsp
+; AVX512F-NEXT:    popq %rbp
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: test_compress_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $7, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmovb2m %ymm1, %k1
+; AVX512VL-NEXT:    vpcompressw %zmm0, %zmm2 {%k1}
+; AVX512VL-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512VL-NEXT:    retq
+    %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru)
+    ret <32 x i16> %out
+}
+
+define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) nounwind {
+; AVX2-LABEL: test_compress_large:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $288, %rsp # imm = 0x120
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    # kill: def $r9d killed $r9d def $r9
+; AVX2-NEXT:    # kill: def $r8d killed $r8d def $r8
+; AVX2-NEXT:    # kill: def $esi killed $esi def $rsi
+; AVX2-NEXT:    movq %rdi, %rax
+; AVX2-NEXT:    vmovss %xmm0, (%rsp)
+; AVX2-NEXT:    andl $1, %esi
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rsi,4)
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %esi, %edx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addl %ecx, %r8d
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%r8,4)
+; AVX2-NEXT:    andl $1, %r9d
+; AVX2-NEXT:    addl %r8d, %r9d
+; AVX2-NEXT:    movzbl 16(%rbp), %ecx
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%r9,4)
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %r9d, %ecx
+; AVX2-NEXT:    movzbl 24(%rbp), %edx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    movzbl 32(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 40(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vmovss %xmm1, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 48(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm1, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 56(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm1, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 64(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm1, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 72(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 80(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 88(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 96(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 104(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vmovss %xmm2, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 112(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm2, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 120(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm2, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 128(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm2, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 136(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm2, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 144(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 152(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 160(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 168(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vmovss %xmm3, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 176(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm3, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 184(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm3, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 192(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm3, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 200(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm3, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 208(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 216(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 224(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 232(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vmovss %xmm4, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 240(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm4, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 248(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm4, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 256(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm4, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 264(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm4, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 272(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 280(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 288(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 296(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vmovss %xmm5, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 304(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm5, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 312(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm5, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 320(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm5, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 328(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm5, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 336(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 344(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 352(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 360(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vmovss %xmm6, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 368(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm6, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 376(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm6, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 384(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm6, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 392(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm6, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 400(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 408(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 416(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 424(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vmovss %xmm7, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 432(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm7, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 440(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm7, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 448(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm7, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 456(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractf128 $1, %ymm7, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    movzbl 464(%rbp), %ecx
+; AVX2-NEXT:    movzbl %cl, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addl %edx, %ecx
+; AVX2-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    movzbl 472(%rbp), %edx
+; AVX2-NEXT:    movzbl %dl, %edx
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addl %ecx, %edx
+; AVX2-NEXT:    # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT:    andl $63, %ecx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
+; AVX2-NEXT:    andl $63, %edx
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    vmovaps (%rsp), %ymm0
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm1
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm2
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm3
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm4
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm5
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm6
+; AVX2-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm7
+; AVX2-NEXT:    vmovaps %ymm7, 224(%rdi)
+; AVX2-NEXT:    vmovaps %ymm6, 192(%rdi)
+; AVX2-NEXT:    vmovaps %ymm5, 160(%rdi)
+; AVX2-NEXT:    vmovaps %ymm4, 128(%rdi)
+; AVX2-NEXT:    vmovaps %ymm3, 96(%rdi)
+; AVX2-NEXT:    vmovaps %ymm2, 64(%rdi)
+; AVX2-NEXT:    vmovaps %ymm1, 32(%rdi)
+; AVX2-NEXT:    vmovaps %ymm0, (%rdi)
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test_compress_large:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
 ; AVX512F-NEXT:    movq %rsp, %rbp
 ; AVX512F-NEXT:    andq $-64, %rsp
-; AVX512F-NEXT:    subq $128, %rsp
-; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm5
-; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
-; AVX512F-NEXT:    vpmovsxbd %xmm5, %zmm5
-; AVX512F-NEXT:    vpslld $31, %zmm5, %zmm5
-; AVX512F-NEXT:    vptestmd %zmm5, %zmm5, %k1
-; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
-; AVX512F-NEXT:    vpslld $31, %zmm1, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k2
-; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512F-NEXT:    vpcompressd %zmm1, %zmm1 {%k2} {z}
-; AVX512F-NEXT:    vpmovdw %zmm1, (%rsp)
-; AVX512F-NEXT:    kshiftrw $8, %k2, %k0
-; AVX512F-NEXT:    kxorw %k0, %k2, %k0
+; AVX512F-NEXT:    subq $640, %rsp # imm = 0x280
+; AVX512F-NEXT:    movzbl 352(%rbp), %eax
+; AVX512F-NEXT:    andl $1, %eax
+; AVX512F-NEXT:    kmovw %eax, %k0
+; AVX512F-NEXT:    movzbl 360(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-5, %ax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw %k1, %k3
+; AVX512F-NEXT:    movzbl 368(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $13, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-9, %ax
+; AVX512F-NEXT:    kmovw %eax, %k7
+; AVX512F-NEXT:    kandw %k7, %k0, %k0
+; AVX512F-NEXT:    kmovw %k7, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 376(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $12, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-17, %ax
+; AVX512F-NEXT:    kmovw %eax, %k5
+; AVX512F-NEXT:    kandw %k5, %k0, %k0
+; AVX512F-NEXT:    movzbl 384(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $11, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-33, %ax
+; AVX512F-NEXT:    kmovw %eax, %k6
+; AVX512F-NEXT:    kandw %k6, %k0, %k0
+; AVX512F-NEXT:    movzbl 392(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $10, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-65, %ax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 400(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-129, %ax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 408(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $8, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-257, %ax # imm = 0xFEFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 416(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $7, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-513, %ax # imm = 0xFDFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 424(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $6, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-1025, %ax # imm = 0xFBFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 432(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $5, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-2049, %ax # imm = 0xF7FF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 440(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $4, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-4097, %ax # imm = 0xEFFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 448(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $3, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    movw $-8193, %ax # imm = 0xDFFF
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 456(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $2, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k1
+; AVX512F-NEXT:    movw $-16385, %ax # imm = 0xBFFF
+; AVX512F-NEXT:    kmovw %eax, %k4
+; AVX512F-NEXT:    kandw %k4, %k1, %k1
+; AVX512F-NEXT:    kmovw %k4, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 464(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $14, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kshiftlw $1, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $1, %k1, %k1
+; AVX512F-NEXT:    movzbl 472(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 224(%rbp), %eax
+; AVX512F-NEXT:    andl $1, %eax
+; AVX512F-NEXT:    movzbl 232(%rbp), %r10d
+; AVX512F-NEXT:    kmovw %r10d, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kmovw %k3, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k3, %k1, %k1
+; AVX512F-NEXT:    movzbl 240(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $13, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 248(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $12, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k5, %k1, %k1
+; AVX512F-NEXT:    movzbl 256(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $11, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k6, %k1, %k1
+; AVX512F-NEXT:    movzbl 264(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $10, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 272(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $9, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 280(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $8, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k0, %k2
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 288(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k0, %k2
+; AVX512F-NEXT:    kshiftrw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 296(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $6, %k0, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 304(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $5, %k0, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 312(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
 ; AVX512F-NEXT:    kshiftrw $4, %k0, %k2
-; AVX512F-NEXT:    kxorw %k2, %k0, %k0
-; AVX512F-NEXT:    kshiftrw $2, %k0, %k2
-; AVX512F-NEXT:    kxorw %k2, %k0, %k0
-; AVX512F-NEXT:    kshiftrw $1, %k0, %k2
-; AVX512F-NEXT:    kxorw %k2, %k0, %k0
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 320(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k0
+; AVX512F-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $3, %k0, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 328(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $2, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k4, %k1, %k1
+; AVX512F-NEXT:    movzbl 336(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $14, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kshiftlw $1, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $1, %k1, %k1
+; AVX512F-NEXT:    movzbl 344(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movzbl 96(%rbp), %eax
+; AVX512F-NEXT:    andl $1, %eax
+; AVX512F-NEXT:    movzbl 104(%rbp), %r10d
+; AVX512F-NEXT:    kmovw %r10d, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kandw %k3, %k1, %k1
+; AVX512F-NEXT:    movzbl 112(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $13, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k4 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k4, %k1, %k1
+; AVX512F-NEXT:    movzbl 120(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $12, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k5, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k5, %k1, %k1
+; AVX512F-NEXT:    movzbl 128(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $11, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kandw %k6, %k1, %k1
+; AVX512F-NEXT:    movzbl 136(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $10, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 144(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $9, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 152(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $8, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k3, %k1, %k1
+; AVX512F-NEXT:    movzbl 160(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 168(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $6, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 176(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $5, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 184(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $4, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 192(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $3, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 200(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $2, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 208(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $14, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kshiftlw $1, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $1, %k1, %k1
+; AVX512F-NEXT:    movzbl 216(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    andl $1, %edi
+; AVX512F-NEXT:    kmovw %esi, %k1
+; AVX512F-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    kmovw %edi, %k2
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw %edx, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $13, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k4, %k1, %k1
+; AVX512F-NEXT:    kmovw %ecx, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $12, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k5, %k1, %k1
+; AVX512F-NEXT:    kmovw %r8d, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $11, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k6, %k1, %k1
+; AVX512F-NEXT:    kmovw %r9d, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $10, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 16(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $9, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k1, %k2, %k1
+; AVX512F-NEXT:    movzbl 24(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $8, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k3, %k1, %k1
+; AVX512F-NEXT:    movzbl 32(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k7, %k1, %k1
+; AVX512F-NEXT:    movzbl 40(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kshiftrw $6, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 48(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k5
+; AVX512F-NEXT:    kshiftrw $5, %k5, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 56(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k4
+; AVX512F-NEXT:    kshiftrw $4, %k4, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k2, %k1, %k1
+; AVX512F-NEXT:    movzbl 64(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k3
+; AVX512F-NEXT:    kshiftrw $3, %k3, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kandw %k0, %k1, %k1
+; AVX512F-NEXT:    movzbl 72(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    kshiftlw $15, %k2, %k2
+; AVX512F-NEXT:    kshiftrw $2, %k2, %k0
+; AVX512F-NEXT:    korw %k0, %k1, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    movzbl 80(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kshiftlw $14, %k1, %k7
+; AVX512F-NEXT:    korw %k7, %k0, %k0
+; AVX512F-NEXT:    kshiftlw $1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k7
+; AVX512F-NEXT:    movzbl 88(%rbp), %eax
+; AVX512F-NEXT:    kmovw %eax, %k0
+; AVX512F-NEXT:    kshiftlw $15, %k0, %k6
+; AVX512F-NEXT:    korw %k6, %k7, %k6
+; AVX512F-NEXT:    kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    movw $-3, %ax
+; AVX512F-NEXT:    kmovw %eax, %k6
+; AVX512F-NEXT:    kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k6, %k7, %k6
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $14, %k7, %k7
+; AVX512F-NEXT:    korw %k7, %k6, %k6
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k7, %k6, %k6
+; AVX512F-NEXT:    kshiftrw $13, %k5, %k5
+; AVX512F-NEXT:    korw %k5, %k6, %k5
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k6 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k6, %k5, %k5
+; AVX512F-NEXT:    kshiftrw $12, %k4, %k4
+; AVX512F-NEXT:    korw %k4, %k5, %k4
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k5 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k5, %k4, %k4
+; AVX512F-NEXT:    kshiftrw $11, %k3, %k3
+; AVX512F-NEXT:    korw %k3, %k4, %k3
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k4 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k4, %k3, %k3
+; AVX512F-NEXT:    kshiftrw $10, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k3, %k2
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k3, %k2, %k2
+; AVX512F-NEXT:    kshiftlw $6, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k2, %k1
+; AVX512F-NEXT:    kshiftlw $9, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512F-NEXT:    kshiftlw $7, %k0, %k0
+; AVX512F-NEXT:    korw %k0, %k1, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $9, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kxorw %k0, %k1, %k0
+; AVX512F-NEXT:    kshiftrw $4, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $2, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    vpcompressd %zmm2, %zmm2 {%k1} {z}
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kandw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $14, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k7, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $13, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k6, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $12, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k5, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $11, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k4, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftrw $10, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kandw %k3, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $6, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftlw $9, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $9, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $7, %k1, %k1
+; AVX512F-NEXT:    korw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $9, %k1, %k1
+; AVX512F-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    kshiftlw $7, %k2, %k2
+; AVX512F-NEXT:    korw %k2, %k1, %k1
+; AVX512F-NEXT:    kxorw %k0, %k1, %k0
+; AVX512F-NEXT:    kshiftrw $4, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $2, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw %k0, %ecx
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    vpcompressd %zmm3, %zmm3 {%k1} {z}
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
+; AVX512F-NEXT:    vpcompressd %zmm0, %zmm0 {%k2} {z}
+; AVX512F-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; AVX512F-NEXT:    vpcompressd %zmm1, %zmm1 {%k1} {z}
+; AVX512F-NEXT:    kxorw %k1, %k2, %k0
+; AVX512F-NEXT:    kshiftrw $8, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $4, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $2, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kshiftrw $1, %k0, %k1
+; AVX512F-NEXT:    kxorw %k1, %k0, %k0
+; AVX512F-NEXT:    kmovw %k0, %edx
+; AVX512F-NEXT:    vmovdqa64 %zmm0, {{[0-9]+}}(%rsp)
 ; AVX512F-NEXT:    andl $31, %eax
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512F-NEXT:    vpcompressd %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    vpmovdw %zmm0, (%rsp,%rax,2)
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512F-NEXT:    vpsllw $15, %ymm4, %ymm1
-; AVX512F-NEXT:    vpsraw $15, %ymm1, %ymm1
-; AVX512F-NEXT:    vpblendvb %ymm1, {{[0-9]+}}(%rsp), %ymm0, %ymm0
-; AVX512F-NEXT:    vpsllw $15, %ymm3, %ymm1
-; AVX512F-NEXT:    vpsraw $15, %ymm1, %ymm1
-; AVX512F-NEXT:    vpblendvb %ymm1, (%rsp), %ymm2, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rsp,%rax,4)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT:    andl $31, %ecx
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 192(%rsp,%rcx,4)
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %zmm0
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %zmm1
+; AVX512F-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT:    andl $63, %edx
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %zmm0
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %zmm2
+; AVX512F-NEXT:    vmovaps %zmm0, 320(%rsp,%rdx,4)
+; AVX512F-NEXT:    vmovaps %zmm1, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT:    vmovaps %zmm2, 384(%rsp,%rdx,4)
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %zmm0
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %zmm1
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %zmm2
+; AVX512F-NEXT:    vmovaps {{[0-9]+}}(%rsp), %zmm3
 ; AVX512F-NEXT:    movq %rbp, %rsp
 ; AVX512F-NEXT:    popq %rbp
 ; AVX512F-NEXT:    retq
 ;
-; AVX512VL-LABEL: test_compress_v32i16:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsllw $7, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpmovb2m %ymm1, %k1
-; AVX512VL-NEXT:    vpcompressw %zmm0, %zmm2 {%k1}
-; AVX512VL-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512VL-NEXT:    retq
-    %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru)
-    ret <32 x i16> %out
-}
-
-define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) nounwind {
 ; AVX512VL-LABEL: test_compress_large:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    pushq %rbp
@@ -970,6 +4478,97 @@ define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ig
 }
 
 define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) nounwind {
+; AVX2-LABEL: test_compress_small:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpcmpgtb %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpextrb $0, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    vpextrb $1, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT:    vpextrb $1, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    vpextrb $2, %xmm0, -24(%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $2, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    vpextrb $3, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT:    vpextrb $3, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    vpextrb $4, %xmm0, -24(%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $4, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    vpextrb $5, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $5, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT:    vpextrb $6, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $6, %xmm0, -24(%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $7, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $7, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT:    vpextrb $8, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $8, %xmm0, -24(%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $9, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $9, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT:    vpextrb $10, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $10, %xmm0, -24(%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $11, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $11, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT:    vpextrb $12, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $12, %xmm0, -24(%rsp,%rcx)
+; AVX2-NEXT:    vpextrb $13, %xmm1, %ecx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $13, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT:    vpextrb $14, %xmm1, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addl %ecx, %eax
+; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
+; AVX2-NEXT:    andl $15, %ecx
+; AVX2-NEXT:    vpextrb $14, %xmm0, -24(%rsp,%rcx)
+; AVX2-NEXT:    andl $15, %eax
+; AVX2-NEXT:    vpextrb $15, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX2-NEXT:    retq
+;
 ; AVX512F-LABEL: test_compress_small:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpslld $31, %xmm1, %xmm1


        


More information about the llvm-commits mailing list