[llvm] r357840 - [X86] Split expandload and compressstore tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 6 07:14:54 PDT 2019


Author: rksimon
Date: Sat Apr  6 07:14:54 2019
New Revision: 357840

URL: http://llvm.org/viewvc/llvm-project?rev=357840&view=rev
Log:
[X86] Split expandload and compressstore tests

Added:
    llvm/trunk/test/CodeGen/X86/masked_compressstore.ll
      - copied, changed from r357833, llvm/trunk/test/CodeGen/X86/compress_expand.ll
    llvm/trunk/test/CodeGen/X86/masked_expandload.ll
      - copied, changed from r357839, llvm/trunk/test/CodeGen/X86/compress_expand.ll
Removed:
    llvm/trunk/test/CodeGen/X86/compress_expand.ll

Removed: llvm/trunk/test/CodeGen/X86/compress_expand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/compress_expand.ll?rev=357839&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/compress_expand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/compress_expand.ll (removed)
@@ -1,416 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=skylake-avx512 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SKX
-; RUN: llc -mcpu=knl < %s | FileCheck %s --check-prefix=ALL --check-prefix=KNL
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define <16 x float> @expandload_v16f32_const_undef(float* %base) {
-; SKX-LABEL: expandload_v16f32_const_undef:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movw $-2049, %ax # imm = 0xF7FF
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vexpandps (%rdi), %zmm0 {%k1} {z}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v16f32_const_undef:
-; KNL:       # %bb.0:
-; KNL-NEXT:    movw $-2049, %ax # imm = 0xF7FF
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1} {z}
-; KNL-NEXT:    retq
-  %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
-  ret <16 x float>%res
-}
-
-define <16 x float> @expandload_v16f32_const(float* %base, <16 x float> %src0) {
-; SKX-LABEL: expandload_v16f32_const:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movw $30719, %ax # imm = 0x77FF
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v16f32_const:
-; KNL:       # %bb.0:
-; KNL-NEXT:    movw $30719, %ax # imm = 0x77FF
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    retq
-  %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x float> %src0)
-  ret <16 x float>%res
-}
-
-define <8 x double> @expandload_v8f64_v8i1(double* %base, <8 x double> %src0, <8 x i1> %mask) {
-; SKX-LABEL: expandload_v8f64_v8i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
-; SKX-NEXT:    vpmovw2m %xmm1, %k1
-; SKX-NEXT:    vexpandpd (%rdi), %zmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v8f64_v8i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT:    vexpandpd (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    retq
-  %res = call <8 x double> @llvm.masked.expandload.v8f64(double* %base, <8 x i1> %mask, <8 x double> %src0)
-  ret <8 x double>%res
-}
-
-define <4 x float> @expandload_v4f32_const(float* %base, <4 x float> %src0) {
-; SKX-LABEL: expandload_v4f32_const:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movb $7, %al
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vexpandps (%rdi), %xmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v4f32_const:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    movw $7, %ax
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; KNL-NEXT:    retq
-  %res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> %src0)
-  ret <4 x float>%res
-}
-
-define <2 x i64> @expandload_v2i64_const(i64* %base, <2 x i64> %src0) {
-; SKX-LABEL: expandload_v2i64_const:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movb $2, %al
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vpexpandq (%rdi), %xmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v2i64_const:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    movb $2, %al
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vpexpandq (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; KNL-NEXT:    retq
-  %res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0)
-  ret <2 x i64>%res
-}
-
-declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>)
-declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>)
-declare <4 x float>  @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>)
-declare <2 x i64>    @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
-
-define void @compressstore_v16f32_const(float* %base, <16 x float> %V) {
-; SKX-LABEL: compressstore_v16f32_const:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movw $-2049, %ax # imm = 0xF7FF
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v16f32_const:
-; KNL:       # %bb.0:
-; KNL-NEXT:    movw $-2049, %ax # imm = 0xF7FF
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-  call void @llvm.masked.compressstore.v16f32(<16 x float> %V, float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>)
-  ret void
-}
-
-define void @compressstore_v8f32_v8i1(float* %base, <8 x float> %V, <8 x i1> %mask) {
-; SKX-LABEL: compressstore_v8f32_v8i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
-; SKX-NEXT:    vpmovw2m %xmm1, %k1
-; SKX-NEXT:    vcompressps %ymm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v8f32_v8i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; KNL-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-  call void @llvm.masked.compressstore.v8f32(<8 x float> %V, float* %base, <8 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v8f64_v8i1(double* %base, <8 x double> %V, <8 x i1> %mask) {
-; SKX-LABEL: compressstore_v8f64_v8i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
-; SKX-NEXT:    vpmovw2m %xmm1, %k1
-; SKX-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v8f64_v8i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-  call void @llvm.masked.compressstore.v8f64(<8 x double> %V, double* %base, <8 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v8i64_v8i1(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
-; SKX-LABEL: compressstore_v8i64_v8i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
-; SKX-NEXT:    vpmovw2m %xmm1, %k1
-; SKX-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v8i64_v8i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-    call void @llvm.masked.compressstore.v8i64(<8 x i64> %V, i64* %base, <8 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v4i64_v4i1(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
-; SKX-LABEL: compressstore_v4i64_v4i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
-; SKX-NEXT:    vpmovd2m %xmm1, %k1
-; SKX-NEXT:    vpcompressq %ymm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v4i64_v4i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $12, %k0, %k0
-; KNL-NEXT:    kshiftrw $12, %k0, %k1
-; KNL-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-    call void @llvm.masked.compressstore.v4i64(<4 x i64> %V, i64* %base, <4 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v2i64_v2i1(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
-; SKX-LABEL: compressstore_v2i64_v2i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
-; SKX-NEXT:    vpmovq2m %xmm1, %k1
-; SKX-NEXT:    vpcompressq %xmm0, (%rdi) {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v2i64_v2i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    vpsllq $63, %xmm1, %xmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $14, %k0, %k0
-; KNL-NEXT:    kshiftrw $14, %k0, %k1
-; KNL-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-    call void @llvm.masked.compressstore.v2i64(<2 x i64> %V, i64* %base, <2 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v4f32_v4i1(float* %base, <4 x float> %V, <4 x i1> %mask) {
-; SKX-LABEL: compressstore_v4f32_v4i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
-; SKX-NEXT:    vpmovd2m %xmm1, %k1
-; SKX-NEXT:    vcompressps %xmm0, (%rdi) {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v4f32_v4i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $12, %k0, %k0
-; KNL-NEXT:    kshiftrw $12, %k0, %k1
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-    call void @llvm.masked.compressstore.v4f32(<4 x float> %V, float* %base, <4 x i1> %mask)
-  ret void
-}
-
-define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
-; SKX-LABEL: expandload_v2f32_v2i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; SKX-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; SKX-NEXT:    vptestnmq %xmm1, %xmm1, %k1
-; SKX-NEXT:    vexpandps (%rdi), %xmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v2f32_v2i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; KNL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; KNL-NEXT:    vptestnmq %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $14, %k0, %k0
-; KNL-NEXT:    kshiftrw $14, %k0, %k1
-; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; KNL-NEXT:    retq
-  %mask = icmp eq <2 x i32> %trigger, zeroinitializer
-  %res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0)
-  ret <2 x float> %res
-}
-
-define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> %trigger) {
-; SKX-LABEL: compressstore_v2f32_v2i32:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; SKX-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; SKX-NEXT:    vptestnmq %xmm1, %xmm1, %k1
-; SKX-NEXT:    vcompressps %xmm0, (%rdi) {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v2f32_v2i32:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; KNL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; KNL-NEXT:    vptestnmq %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $14, %k0, %k0
-; KNL-NEXT:    kshiftrw $14, %k0, %k1
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-  %mask = icmp eq <2 x i32> %trigger, zeroinitializer
-  call void @llvm.masked.compressstore.v2f32(<2 x float> %V, float* %base, <2 x i1> %mask)
-  ret void
-}
-
-define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
-; ALL-LABEL: expandload_v32f32_v32i32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    vptestnmd %zmm3, %zmm3, %k1
-; ALL-NEXT:    vptestnmd %zmm2, %zmm2, %k2
-; ALL-NEXT:    kmovw %k2, %eax
-; ALL-NEXT:    popcntl %eax, %eax
-; ALL-NEXT:    vexpandps (%rdi,%rax,4), %zmm1 {%k1}
-; ALL-NEXT:    vexpandps (%rdi), %zmm0 {%k2}
-; ALL-NEXT:    retq
-  %mask = icmp eq <32 x i32> %trigger, zeroinitializer
-  %res = call <32 x float> @llvm.masked.expandload.v32f32(float* %base, <32 x i1> %mask, <32 x float> %src0)
-  ret <32 x float> %res
-}
-
-define <16 x double> @compressstore_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
-; SKX-LABEL: compressstore_v16f64_v16i32:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; SKX-NEXT:    vptestnmd %ymm3, %ymm3, %k1
-; SKX-NEXT:    vptestnmd %ymm2, %ymm2, %k2
-; SKX-NEXT:    kmovb %k2, %eax
-; SKX-NEXT:    popcntl %eax, %eax
-; SKX-NEXT:    vexpandpd (%rdi,%rax,8), %zmm1 {%k1}
-; SKX-NEXT:    vexpandpd (%rdi), %zmm0 {%k2}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v16f64_v16i32:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; KNL-NEXT:    vptestnmd %zmm3, %zmm3, %k1
-; KNL-NEXT:    vptestnmd %zmm2, %zmm2, %k2
-; KNL-NEXT:    vexpandpd (%rdi), %zmm0 {%k2}
-; KNL-NEXT:    kmovw %k2, %eax
-; KNL-NEXT:    movzbl %al, %eax
-; KNL-NEXT:    popcntl %eax, %eax
-; KNL-NEXT:    vexpandpd (%rdi,%rax,8), %zmm1 {%k1}
-; KNL-NEXT:    retq
-  %mask = icmp eq <16 x i32> %trigger, zeroinitializer
-  %res = call <16 x double> @llvm.masked.expandload.v16f64(double* %base, <16 x i1> %mask, <16 x double> %src0)
-  ret <16 x double> %res
-}
-
-define void @compressstore_v32f32_v32i32(float* %base, <32 x float> %V, <32 x i32> %trigger) {
-; SKX-LABEL: compressstore_v32f32_v32i32:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vptestnmd %zmm3, %zmm3, %k1
-; SKX-NEXT:    vptestnmd %zmm2, %zmm2, %k2
-; SKX-NEXT:    kmovw %k2, %eax
-; SKX-NEXT:    popcntl %eax, %eax
-; SKX-NEXT:    vcompressps %zmm1, (%rdi,%rax,4) {%k1}
-; SKX-NEXT:    vcompressps %zmm0, (%rdi) {%k2}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v32f32_v32i32:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptestnmd %zmm3, %zmm3, %k1
-; KNL-NEXT:    vptestnmd %zmm2, %zmm2, %k2
-; KNL-NEXT:    kmovw %k2, %eax
-; KNL-NEXT:    popcntl %eax, %eax
-; KNL-NEXT:    vcompressps %zmm1, (%rdi,%rax,4) {%k1}
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k2}
-; KNL-NEXT:    retq
-  %mask = icmp eq <32 x i32> %trigger, zeroinitializer
-  call void @llvm.masked.compressstore.v32f32(<32 x float> %V, float* %base, <32 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v16f64_v16i1(double* %base, <16 x double> %V, <16 x i1> %mask) {
-; SKX-LABEL: compressstore_v16f64_v16i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $7, %xmm2, %xmm2
-; SKX-NEXT:    vpmovb2m %xmm2, %k1
-; SKX-NEXT:    kshiftrw $8, %k1, %k2
-; SKX-NEXT:    kmovb %k1, %eax
-; SKX-NEXT:    popcntl %eax, %eax
-; SKX-NEXT:    vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
-; SKX-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v16f64_v16i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; KNL-NEXT:    vpslld $31, %zmm2, %zmm2
-; KNL-NEXT:    vptestmd %zmm2, %zmm2, %k1
-; KNL-NEXT:    kshiftrw $8, %k1, %k2
-; KNL-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    kmovw %k1, %eax
-; KNL-NEXT:    movzbl %al, %eax
-; KNL-NEXT:    popcntl %eax, %eax
-; KNL-NEXT:    vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
-; KNL-NEXT:    retq
-  call void @llvm.masked.compressstore.v16f64(<16 x double> %V, double* %base, <16 x i1> %mask)
-  ret void
-}
-
-declare void @llvm.masked.compressstore.v16f32(<16 x float>, float* , <16 x i1>)
-declare void @llvm.masked.compressstore.v8f32(<8 x float>, float* , <8 x i1>)
-declare void @llvm.masked.compressstore.v8f64(<8 x double>, double* , <8 x i1>)
-declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32* , <16 x i1>)
-declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32* , <8 x i1>)
-declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64* , <8 x i1>)
-declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32* , <4 x i1>)
-declare void @llvm.masked.compressstore.v4f32(<4 x float>, float* , <4 x i1>)
-declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64* , <4 x i1>)
-declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64* , <2 x i1>)
-declare void @llvm.masked.compressstore.v2f32(<2 x float>, float* , <2 x i1>)
-declare void @llvm.masked.compressstore.v32f32(<32 x float>, float* , <32 x i1>)
-declare void @llvm.masked.compressstore.v16f64(<16 x double>, double* , <16 x i1>)
-declare void @llvm.masked.compressstore.v32f64(<32 x double>, double* , <32 x i1>)
-
-declare <2 x float> @llvm.masked.expandload.v2f32(float* , <2 x i1> , <2 x float> )
-declare <32 x float> @llvm.masked.expandload.v32f32(float* , <32 x i1> , <32 x float> )
-declare <16 x double> @llvm.masked.expandload.v16f64(double* , <16 x i1> , <16 x double> )

Copied: llvm/trunk/test/CodeGen/X86/masked_compressstore.ll (from r357833, llvm/trunk/test/CodeGen/X86/compress_expand.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_compressstore.ll?p2=llvm/trunk/test/CodeGen/X86/masked_compressstore.ll&p1=llvm/trunk/test/CodeGen/X86/compress_expand.ll&r1=357833&r2=357840&rev=357840&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/compress_expand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_compressstore.ll Sat Apr  6 07:14:54 2019
@@ -1,109 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=skylake-avx512 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SKX
-; RUN: llc -mcpu=knl < %s | FileCheck %s --check-prefix=ALL --check-prefix=KNL
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define <16 x float> @expandload_v16f32_const_undef(float* %base) {
-; SKX-LABEL: expandload_v16f32_const_undef:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movw $-2049, %ax # imm = 0xF7FF
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vexpandps (%rdi), %zmm0 {%k1} {z}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v16f32_const_undef:
-; KNL:       # %bb.0:
-; KNL-NEXT:    movw $-2049, %ax # imm = 0xF7FF
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1} {z}
-; KNL-NEXT:    retq
-  %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
-  ret <16 x float>%res
-}
-
-define <16 x float> @expandload_v16f32_const(float* %base, <16 x float> %src0) {
-; SKX-LABEL: expandload_v16f32_const:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movw $30719, %ax # imm = 0x77FF
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v16f32_const:
-; KNL:       # %bb.0:
-; KNL-NEXT:    movw $30719, %ax # imm = 0x77FF
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    retq
-  %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x float> %src0)
-  ret <16 x float>%res
-}
-
-define <8 x double> @expandload_v8f64_v8i1(double* %base, <8 x double> %src0, <8 x i1> %mask) {
-; SKX-LABEL: expandload_v8f64_v8i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
-; SKX-NEXT:    vpmovw2m %xmm1, %k1
-; SKX-NEXT:    vexpandpd (%rdi), %zmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v8f64_v8i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT:    vexpandpd (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    retq
-  %res = call <8 x double> @llvm.masked.expandload.v8f64(double* %base, <8 x i1> %mask, <8 x double> %src0)
-  ret <8 x double>%res
-}
-
-define <4 x float> @expandload_v4f32_const(float* %base, <4 x float> %src0) {
-; SKX-LABEL: expandload_v4f32_const:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movb $7, %al
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vexpandps (%rdi), %xmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v4f32_const:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    movw $7, %ax
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; KNL-NEXT:    retq
-  %res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> %src0)
-  ret <4 x float>%res
-}
-
-define <2 x i64> @expandload_v2i64_const(i64* %base, <2 x i64> %src0) {
-; SKX-LABEL: expandload_v2i64_const:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movb $2, %al
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vpexpandq (%rdi), %xmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v2i64_const:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    movb $2, %al
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vpexpandq (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; KNL-NEXT:    retq
-  %res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0)
-  ret <2 x i64>%res
-}
-
-declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>)
-declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>)
-declare <4 x float>  @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>)
-declare <2 x i64>    @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=ALL,SKX
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl | FileCheck %s --check-prefixes=ALL,KNL
 
 define void @compressstore_v16f32_const(float* %base, <16 x float> %V) {
 ; SKX-LABEL: compressstore_v16f32_const:
@@ -249,31 +146,6 @@ define void @compressstore_v4f32_v4i1(fl
   ret void
 }
 
-define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
-; SKX-LABEL: expandload_v2f32_v2i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; SKX-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; SKX-NEXT:    vptestnmq %xmm1, %xmm1, %k1
-; SKX-NEXT:    vexpandps (%rdi), %xmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: expandload_v2f32_v2i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; KNL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; KNL-NEXT:    vptestnmq %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $14, %k0, %k0
-; KNL-NEXT:    kshiftrw $14, %k0, %k1
-; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; KNL-NEXT:    retq
-  %mask = icmp eq <2 x i32> %trigger, zeroinitializer
-  %res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0)
-  ret <2 x float> %res
-}
-
 define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> %trigger) {
 ; SKX-LABEL: compressstore_v2f32_v2i32:
 ; SKX:       # %bb.0:
@@ -298,49 +170,6 @@ define void @compressstore_v2f32_v2i32(f
   ret void
 }
 
-define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
-; ALL-LABEL: expandload_v32f32_v32i32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    vptestnmd %zmm3, %zmm3, %k1
-; ALL-NEXT:    vptestnmd %zmm2, %zmm2, %k2
-; ALL-NEXT:    kmovw %k2, %eax
-; ALL-NEXT:    popcntl %eax, %eax
-; ALL-NEXT:    vexpandps (%rdi,%rax,4), %zmm1 {%k1}
-; ALL-NEXT:    vexpandps (%rdi), %zmm0 {%k2}
-; ALL-NEXT:    retq
-  %mask = icmp eq <32 x i32> %trigger, zeroinitializer
-  %res = call <32 x float> @llvm.masked.expandload.v32f32(float* %base, <32 x i1> %mask, <32 x float> %src0)
-  ret <32 x float> %res
-}
-
-define <16 x double> @compressstore_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
-; SKX-LABEL: compressstore_v16f64_v16i32:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; SKX-NEXT:    vptestnmd %ymm3, %ymm3, %k1
-; SKX-NEXT:    vptestnmd %ymm2, %ymm2, %k2
-; SKX-NEXT:    kmovb %k2, %eax
-; SKX-NEXT:    popcntl %eax, %eax
-; SKX-NEXT:    vexpandpd (%rdi,%rax,8), %zmm1 {%k1}
-; SKX-NEXT:    vexpandpd (%rdi), %zmm0 {%k2}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v16f64_v16i32:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; KNL-NEXT:    vptestnmd %zmm3, %zmm3, %k1
-; KNL-NEXT:    vptestnmd %zmm2, %zmm2, %k2
-; KNL-NEXT:    vexpandpd (%rdi), %zmm0 {%k2}
-; KNL-NEXT:    kmovw %k2, %eax
-; KNL-NEXT:    movzbl %al, %eax
-; KNL-NEXT:    popcntl %eax, %eax
-; KNL-NEXT:    vexpandpd (%rdi,%rax,8), %zmm1 {%k1}
-; KNL-NEXT:    retq
-  %mask = icmp eq <16 x i32> %trigger, zeroinitializer
-  %res = call <16 x double> @llvm.masked.expandload.v16f64(double* %base, <16 x i1> %mask, <16 x double> %src0)
-  ret <16 x double> %res
-}
-
 define void @compressstore_v32f32_v32i32(float* %base, <32 x float> %V, <32 x i32> %trigger) {
 ; SKX-LABEL: compressstore_v32f32_v32i32:
 ; SKX:       # %bb.0:
@@ -396,21 +225,34 @@ define void @compressstore_v16f64_v16i1(
   ret void
 }
 
-declare void @llvm.masked.compressstore.v16f32(<16 x float>, float* , <16 x i1>)
-declare void @llvm.masked.compressstore.v8f32(<8 x float>, float* , <8 x i1>)
-declare void @llvm.masked.compressstore.v8f64(<8 x double>, double* , <8 x i1>)
-declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32* , <16 x i1>)
-declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32* , <8 x i1>)
-declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64* , <8 x i1>)
-declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32* , <4 x i1>)
-declare void @llvm.masked.compressstore.v4f32(<4 x float>, float* , <4 x i1>)
-declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64* , <4 x i1>)
-declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64* , <2 x i1>)
-declare void @llvm.masked.compressstore.v2f32(<2 x float>, float* , <2 x i1>)
-declare void @llvm.masked.compressstore.v32f32(<32 x float>, float* , <32 x i1>)
-declare void @llvm.masked.compressstore.v16f64(<16 x double>, double* , <16 x i1>)
-declare void @llvm.masked.compressstore.v32f64(<32 x double>, double* , <32 x i1>)
-
-declare <2 x float> @llvm.masked.expandload.v2f32(float* , <2 x i1> , <2 x float> )
-declare <32 x float> @llvm.masked.expandload.v32f32(float* , <32 x i1> , <32 x float> )
-declare <16 x double> @llvm.masked.expandload.v16f64(double* , <16 x i1> , <16 x double> )
+declare void @llvm.masked.compressstore.v16f64(<16 x double>, double*, <16 x i1>)
+declare void @llvm.masked.compressstore.v8f64(<8 x double>, double*, <8 x i1>)
+declare void @llvm.masked.compressstore.v4f64(<4 x double>, double*, <4 x i1>)
+declare void @llvm.masked.compressstore.v2f64(<2 x double>, double*, <2 x i1>)
+declare void @llvm.masked.compressstore.v1f64(<1 x double>, double*, <1 x i1>)
+
+declare void @llvm.masked.compressstore.v32f32(<32 x float>, float*, <32 x i1>)
+declare void @llvm.masked.compressstore.v16f32(<16 x float>, float*, <16 x i1>)
+declare void @llvm.masked.compressstore.v8f32(<8 x float>, float*, <8 x i1>)
+declare void @llvm.masked.compressstore.v4f32(<4 x float>, float*, <4 x i1>)
+declare void @llvm.masked.compressstore.v2f32(<2 x float>, float*, <2 x i1>)
+
+declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64*, <8 x i1>)
+declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64*, <4 x i1>)
+declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64*, <2 x i1>)
+declare void @llvm.masked.compressstore.v1i64(<1 x i64>, i64*, <1 x i1>)
+
+declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32*, <16 x i1>)
+declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32*, <8 x i1>)
+declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32*, <4 x i1>)
+declare void @llvm.masked.compressstore.v2i32(<2 x i32>, i32*, <2 x i1>)
+
+declare void @llvm.masked.compressstore.v32i16(<32 x i16>, i16*, <32 x i1>)
+declare void @llvm.masked.compressstore.v16i16(<16 x i16>, i16*, <16 x i1>)
+declare void @llvm.masked.compressstore.v8i16(<8 x i16>, i16*, <8 x i1>)
+declare void @llvm.masked.compressstore.v4i16(<4 x i16>, i16*, <4 x i1>)
+
+declare void @llvm.masked.compressstore.v64i8(<64 x i8>, i8*, <64 x i1>)
+declare void @llvm.masked.compressstore.v32i8(<32 x i8>, i8*, <32 x i1>)
+declare void @llvm.masked.compressstore.v16i8(<16 x i8>, i8*, <16 x i1>)
+declare void @llvm.masked.compressstore.v8i8(<8 x i8>, i8*, <8 x i1>)

Copied: llvm/trunk/test/CodeGen/X86/masked_expandload.ll (from r357839, llvm/trunk/test/CodeGen/X86/compress_expand.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_expandload.ll?p2=llvm/trunk/test/CodeGen/X86/masked_expandload.ll&p1=llvm/trunk/test/CodeGen/X86/compress_expand.ll&r1=357839&r2=357840&rev=357840&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/compress_expand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_expandload.ll Sat Apr  6 07:14:54 2019
@@ -1,9 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=skylake-avx512 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SKX
-; RUN: llc -mcpu=knl < %s | FileCheck %s --check-prefix=ALL --check-prefix=KNL
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=ALL,SKX
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl | FileCheck %s --check-prefixes=ALL,KNL
 
 define <16 x float> @expandload_v16f32_const_undef(float* %base) {
 ; SKX-LABEL: expandload_v16f32_const_undef:
@@ -100,155 +97,6 @@ define <2 x i64> @expandload_v2i64_const
   ret <2 x i64>%res
 }
 
-declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>)
-declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>)
-declare <4 x float>  @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>)
-declare <2 x i64>    @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
-
-define void @compressstore_v16f32_const(float* %base, <16 x float> %V) {
-; SKX-LABEL: compressstore_v16f32_const:
-; SKX:       # %bb.0:
-; SKX-NEXT:    movw $-2049, %ax # imm = 0xF7FF
-; SKX-NEXT:    kmovd %eax, %k1
-; SKX-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v16f32_const:
-; KNL:       # %bb.0:
-; KNL-NEXT:    movw $-2049, %ax # imm = 0xF7FF
-; KNL-NEXT:    kmovw %eax, %k1
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-  call void @llvm.masked.compressstore.v16f32(<16 x float> %V, float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>)
-  ret void
-}
-
-define void @compressstore_v8f32_v8i1(float* %base, <8 x float> %V, <8 x i1> %mask) {
-; SKX-LABEL: compressstore_v8f32_v8i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
-; SKX-NEXT:    vpmovw2m %xmm1, %k1
-; SKX-NEXT:    vcompressps %ymm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v8f32_v8i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; KNL-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-  call void @llvm.masked.compressstore.v8f32(<8 x float> %V, float* %base, <8 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v8f64_v8i1(double* %base, <8 x double> %V, <8 x i1> %mask) {
-; SKX-LABEL: compressstore_v8f64_v8i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
-; SKX-NEXT:    vpmovw2m %xmm1, %k1
-; SKX-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v8f64_v8i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-  call void @llvm.masked.compressstore.v8f64(<8 x double> %V, double* %base, <8 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v8i64_v8i1(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
-; SKX-LABEL: compressstore_v8i64_v8i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
-; SKX-NEXT:    vpmovw2m %xmm1, %k1
-; SKX-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v8i64_v8i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-    call void @llvm.masked.compressstore.v8i64(<8 x i64> %V, i64* %base, <8 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v4i64_v4i1(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
-; SKX-LABEL: compressstore_v4i64_v4i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
-; SKX-NEXT:    vpmovd2m %xmm1, %k1
-; SKX-NEXT:    vpcompressq %ymm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v4i64_v4i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $12, %k0, %k0
-; KNL-NEXT:    kshiftrw $12, %k0, %k1
-; KNL-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-    call void @llvm.masked.compressstore.v4i64(<4 x i64> %V, i64* %base, <4 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v2i64_v2i1(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
-; SKX-LABEL: compressstore_v2i64_v2i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
-; SKX-NEXT:    vpmovq2m %xmm1, %k1
-; SKX-NEXT:    vpcompressq %xmm0, (%rdi) {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v2i64_v2i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    vpsllq $63, %xmm1, %xmm1
-; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $14, %k0, %k0
-; KNL-NEXT:    kshiftrw $14, %k0, %k1
-; KNL-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-    call void @llvm.masked.compressstore.v2i64(<2 x i64> %V, i64* %base, <2 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v4f32_v4i1(float* %base, <4 x float> %V, <4 x i1> %mask) {
-; SKX-LABEL: compressstore_v4f32_v4i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
-; SKX-NEXT:    vpmovd2m %xmm1, %k1
-; SKX-NEXT:    vcompressps %xmm0, (%rdi) {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v4f32_v4i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $12, %k0, %k0
-; KNL-NEXT:    kshiftrw $12, %k0, %k1
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-    call void @llvm.masked.compressstore.v4f32(<4 x float> %V, float* %base, <4 x i1> %mask)
-  ret void
-}
-
 define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
 ; SKX-LABEL: expandload_v2f32_v2i1:
 ; SKX:       # %bb.0:
@@ -274,30 +122,6 @@ define <2 x float> @expandload_v2f32_v2i
   ret <2 x float> %res
 }
 
-define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> %trigger) {
-; SKX-LABEL: compressstore_v2f32_v2i32:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; SKX-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; SKX-NEXT:    vptestnmq %xmm1, %xmm1, %k1
-; SKX-NEXT:    vcompressps %xmm0, (%rdi) {%k1}
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v2f32_v2i32:
-; KNL:       # %bb.0:
-; KNL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; KNL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; KNL-NEXT:    vptestnmq %zmm1, %zmm1, %k0
-; KNL-NEXT:    kshiftlw $14, %k0, %k0
-; KNL-NEXT:    kshiftrw $14, %k0, %k1
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    retq
-  %mask = icmp eq <2 x i32> %trigger, zeroinitializer
-  call void @llvm.masked.compressstore.v2f32(<2 x float> %V, float* %base, <2 x i1> %mask)
-  ret void
-}
-
 define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
 ; ALL-LABEL: expandload_v32f32_v32i32:
 ; ALL:       # %bb.0:
@@ -313,8 +137,8 @@ define <32 x float> @expandload_v32f32_v
   ret <32 x float> %res
 }
 
-define <16 x double> @compressstore_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
-; SKX-LABEL: compressstore_v16f64_v16i32:
+define <16 x double> @expandload_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
+; SKX-LABEL: expandload_v16f64_v16i32:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
 ; SKX-NEXT:    vptestnmd %ymm3, %ymm3, %k1
@@ -325,7 +149,7 @@ define <16 x double> @compressstore_v16f
 ; SKX-NEXT:    vexpandpd (%rdi), %zmm0 {%k2}
 ; SKX-NEXT:    retq
 ;
-; KNL-LABEL: compressstore_v16f64_v16i32:
+; KNL-LABEL: expandload_v16f64_v16i32:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
 ; KNL-NEXT:    vptestnmd %zmm3, %zmm3, %k1
@@ -341,76 +165,34 @@ define <16 x double> @compressstore_v16f
   ret <16 x double> %res
 }
 
-define void @compressstore_v32f32_v32i32(float* %base, <32 x float> %V, <32 x i32> %trigger) {
-; SKX-LABEL: compressstore_v32f32_v32i32:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vptestnmd %zmm3, %zmm3, %k1
-; SKX-NEXT:    vptestnmd %zmm2, %zmm2, %k2
-; SKX-NEXT:    kmovw %k2, %eax
-; SKX-NEXT:    popcntl %eax, %eax
-; SKX-NEXT:    vcompressps %zmm1, (%rdi,%rax,4) {%k1}
-; SKX-NEXT:    vcompressps %zmm0, (%rdi) {%k2}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v32f32_v32i32:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptestnmd %zmm3, %zmm3, %k1
-; KNL-NEXT:    vptestnmd %zmm2, %zmm2, %k2
-; KNL-NEXT:    kmovw %k2, %eax
-; KNL-NEXT:    popcntl %eax, %eax
-; KNL-NEXT:    vcompressps %zmm1, (%rdi,%rax,4) {%k1}
-; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k2}
-; KNL-NEXT:    retq
-  %mask = icmp eq <32 x i32> %trigger, zeroinitializer
-  call void @llvm.masked.compressstore.v32f32(<32 x float> %V, float* %base, <32 x i1> %mask)
-  ret void
-}
-
-define void @compressstore_v16f64_v16i1(double* %base, <16 x double> %V, <16 x i1> %mask) {
-; SKX-LABEL: compressstore_v16f64_v16i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $7, %xmm2, %xmm2
-; SKX-NEXT:    vpmovb2m %xmm2, %k1
-; SKX-NEXT:    kshiftrw $8, %k1, %k2
-; SKX-NEXT:    kmovb %k1, %eax
-; SKX-NEXT:    popcntl %eax, %eax
-; SKX-NEXT:    vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
-; SKX-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
-; SKX-NEXT:    vzeroupper
-; SKX-NEXT:    retq
-;
-; KNL-LABEL: compressstore_v16f64_v16i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; KNL-NEXT:    vpslld $31, %zmm2, %zmm2
-; KNL-NEXT:    vptestmd %zmm2, %zmm2, %k1
-; KNL-NEXT:    kshiftrw $8, %k1, %k2
-; KNL-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
-; KNL-NEXT:    kmovw %k1, %eax
-; KNL-NEXT:    movzbl %al, %eax
-; KNL-NEXT:    popcntl %eax, %eax
-; KNL-NEXT:    vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
-; KNL-NEXT:    retq
-  call void @llvm.masked.compressstore.v16f64(<16 x double> %V, double* %base, <16 x i1> %mask)
-  ret void
-}
+declare <16 x double> @llvm.masked.expandload.v16f64(double*, <16 x i1>, <16 x double>)
+declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>)
+declare <4 x double> @llvm.masked.expandload.v4f64(double*, <4 x i1>, <4 x double>)
+declare <2 x double> @llvm.masked.expandload.v2f64(double*, <2 x i1>, <2 x double>)
+declare <1 x double> @llvm.masked.expandload.v1f64(double*, <1 x i1>, <1 x double>)
 
-declare void @llvm.masked.compressstore.v16f32(<16 x float>, float* , <16 x i1>)
-declare void @llvm.masked.compressstore.v8f32(<8 x float>, float* , <8 x i1>)
-declare void @llvm.masked.compressstore.v8f64(<8 x double>, double* , <8 x i1>)
-declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32* , <16 x i1>)
-declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32* , <8 x i1>)
-declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64* , <8 x i1>)
-declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32* , <4 x i1>)
-declare void @llvm.masked.compressstore.v4f32(<4 x float>, float* , <4 x i1>)
-declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64* , <4 x i1>)
-declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64* , <2 x i1>)
-declare void @llvm.masked.compressstore.v2f32(<2 x float>, float* , <2 x i1>)
-declare void @llvm.masked.compressstore.v32f32(<32 x float>, float* , <32 x i1>)
-declare void @llvm.masked.compressstore.v16f64(<16 x double>, double* , <16 x i1>)
-declare void @llvm.masked.compressstore.v32f64(<32 x double>, double* , <32 x i1>)
-
-declare <2 x float> @llvm.masked.expandload.v2f32(float* , <2 x i1> , <2 x float> )
-declare <32 x float> @llvm.masked.expandload.v32f32(float* , <32 x i1> , <32 x float> )
-declare <16 x double> @llvm.masked.expandload.v16f64(double* , <16 x i1> , <16 x double> )
+declare <32 x float> @llvm.masked.expandload.v32f32(float*, <32 x i1>, <32 x float>)
+declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>)
+declare <8 x float> @llvm.masked.expandload.v8f32(float*, <8 x i1>, <8 x float>)
+declare <4 x float> @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>)
+declare <2 x float> @llvm.masked.expandload.v2f32(float*, <2 x i1>, <2 x float>)
+
+declare <8 x i64> @llvm.masked.expandload.v8i64(i64*, <8 x i1>, <8 x i64>)
+declare <4 x i64> @llvm.masked.expandload.v4i64(i64*, <4 x i1>, <4 x i64>)
+declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
+declare <1 x i64> @llvm.masked.expandload.v1i64(i64*, <1 x i1>, <1 x i64>)
+
+declare <16 x i32> @llvm.masked.expandload.v16i32(i32*, <16 x i1>, <16 x i32>)
+declare <8 x i32> @llvm.masked.expandload.v8i32(i32*, <8 x i1>, <8 x i32>)
+declare <4 x i32> @llvm.masked.expandload.v4i32(i32*, <4 x i1>, <4 x i32>)
+declare <2 x i32> @llvm.masked.expandload.v2i32(i32*, <2 x i1>, <2 x i32>)
+
+declare <32 x i16> @llvm.masked.expandload.v32i16(i16*, <32 x i1>, <32 x i16>)
+declare <16 x i16> @llvm.masked.expandload.v16i16(i16*, <16 x i1>, <16 x i16>)
+declare <8 x i16> @llvm.masked.expandload.v8i16(i16*, <8 x i1>, <8 x i16>)
+declare <4 x i16> @llvm.masked.expandload.v4i16(i16*, <4 x i1>, <4 x i16>)
+
+declare <64 x i8> @llvm.masked.expandload.v64i8(i8*, <64 x i1>, <64 x i8>)
+declare <32 x i8> @llvm.masked.expandload.v32i8(i8*, <32 x i1>, <32 x i8>)
+declare <16 x i8> @llvm.masked.expandload.v16i8(i8*, <16 x i1>, <16 x i8>)
+declare <8 x i8> @llvm.masked.expandload.v8i8(i8*, <8 x i1>, <8 x i8>)




More information about the llvm-commits mailing list