[llvm] [x86] Add lowering for `@llvm.experimental.vector.compress` (PR #104904)
Lawrence Benson via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 20 05:15:54 PDT 2024
https://github.com/lawben updated https://github.com/llvm/llvm-project/pull/104904
>From ace996b3f312b768e201b16cdc5f6d80d8e51fe9 Mon Sep 17 00:00:00 2001
From: Lawrence Benson <github at lawben.com>
Date: Thu, 15 Aug 2024 09:09:02 +0200
Subject: [PATCH 1/4] Add basic vector_compress pattern match
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 4 ++++
llvm/lib/Target/X86/X86InstrAVX512.td | 6 ++++++
2 files changed, 10 insertions(+)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2759252693f9f8..0c046d53a092b7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2321,6 +2321,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}
+ if (Subtarget.hasAVX512()) {
+ setOperationAction(ISD::VECTOR_COMPRESS, MVT::v4i32, Legal);
+ }
+
if (!Subtarget.useSoftFloat() &&
(Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
addRegisterClass(MVT::v8bf16, Subtarget.hasAVX512() ? &X86::VR128XRegClass
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index e616a8a37c6487..487fa12ac17fad 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -10543,6 +10543,12 @@ multiclass compress_by_vec_width_lowering<X86VectorVTInfo _, string Name> {
def : Pat<(X86compress (_.VT _.RC:$src), _.ImmAllZerosV, _.KRCWM:$mask),
(!cast<Instruction>(Name#_.ZSuffix#rrkz)
_.KRCWM:$mask, _.RC:$src)>;
+ def : Pat<(_.VT (vector_compress _.RC:$src, _.KRCWM:$mask, undef)),
+ (!cast<Instruction>(Name#_.ZSuffix#rrkz)
+ _.KRCWM:$mask, _.RC:$src)>;
+ def : Pat<(_.VT (vector_compress _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)),
+ (!cast<Instruction>(Name#_.ZSuffix#rrk)
+ _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)>;
}
multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
>From 589e89646266abb59e730bff281a706f804971c3 Mon Sep 17 00:00:00 2001
From: Lawrence Benson <github at lawben.com>
Date: Tue, 20 Aug 2024 10:05:12 +0200
Subject: [PATCH 2/4] Add VBMI2 handling
---
.../CodeGen/SelectionDAG/TargetLowering.cpp | 9 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 10 +-
llvm/lib/Target/X86/X86InstrAVX512.td | 2 +-
llvm/test/CodeGen/X86/vector-compress-avx2.ll | 281 +++++++++++
.../CodeGen/X86/vector-compress-avx512.ll | 455 ++++++++++++++++++
5 files changed, 752 insertions(+), 5 deletions(-)
create mode 100644 llvm/test/CodeGen/X86/vector-compress-avx2.ll
create mode 100644 llvm/test/CodeGen/X86/vector-compress-avx512.ll
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index c4f4261a708fda..cebbc80974d6fa 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -11582,11 +11582,12 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,
// ... if it is not a splat vector, we need to get the passthru value at
// position = popcount(mask) and re-load it from the stack before it is
// overwritten in the loop below.
+ EVT PopcountVT = ScalarVT.changeTypeToInteger();
SDValue Popcount = DAG.getNode(
ISD::TRUNCATE, DL, MaskVT.changeVectorElementType(MVT::i1), Mask);
Popcount = DAG.getNode(ISD::ZERO_EXTEND, DL,
- MaskVT.changeVectorElementType(ScalarVT), Popcount);
- Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, ScalarVT, Popcount);
+ MaskVT.changeVectorElementType(PopcountVT), Popcount);
+ Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, PopcountVT, Popcount);
SDValue LastElmtPtr =
getVectorElementPointer(DAG, StackPtr, VecVT, Popcount);
LastWriteVal = DAG.getLoad(
@@ -11625,8 +11626,10 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,
// Re-write the last ValI if all lanes were selected. Otherwise,
// overwrite the last write it with the passthru value.
+ SDNodeFlags Flags{};
+ Flags.setUnpredictable(true);
LastWriteVal =
- DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal);
+ DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal, Flags);
Chain = DAG.getStore(
Chain, DL, LastWriteVal, OutPtr,
MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 0c046d53a092b7..fa7c7b2789b7dc 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2322,7 +2322,15 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
if (Subtarget.hasAVX512()) {
- setOperationAction(ISD::VECTOR_COMPRESS, MVT::v4i32, Legal);
+ for (MVT VT : {MVT::v4i32, MVT::v4f32, MVT::v2i64, MVT::v2f64, MVT::v8i32,
+ MVT::v8f32, MVT::v4i64, MVT::v4f64, MVT::v16i32, MVT::v16f32,
+ MVT::v8i64, MVT::v8f64})
+ setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal);
+
+ if (Subtarget.hasVBMI2())
+ for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16,
+ MVT::v64i8, MVT::v32i16})
+ setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal);
}
if (!Subtarget.useSoftFloat() &&
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 487fa12ac17fad..d9f9432a10114d 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -10548,7 +10548,7 @@ multiclass compress_by_vec_width_lowering<X86VectorVTInfo _, string Name> {
_.KRCWM:$mask, _.RC:$src)>;
def : Pat<(_.VT (vector_compress _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)),
(!cast<Instruction>(Name#_.ZSuffix#rrk)
- _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)>;
+ _.RC:$passthru, _.KRCWM:$mask, _.RC:$src)>;
}
multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
diff --git a/llvm/test/CodeGen/X86/vector-compress-avx2.ll b/llvm/test/CodeGen/X86/vector-compress-avx2.ll
new file mode 100644
index 00000000000000..59db0fb7953862
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-compress-avx2.ll
@@ -0,0 +1,281 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=x86_64 -mattr=avx2 < %s | FileCheck %s
+
+; The main logic for vpcompress is tested in the -avx512.ll version of this file.
+; This file only checks the fallback expand path.
+
+define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) {
+; CHECK-LABEL: test_compress_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
+; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1
+; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: vpextrd $1, %xmm1, %eax
+; CHECK-NEXT: vmovd %xmm1, %esi
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: movl %esi, %edi
+; CHECK-NEXT: subl %eax, %edi
+; CHECK-NEXT: vpextrd $2, %xmm1, %edx
+; CHECK-NEXT: subl %edx, %edi
+; CHECK-NEXT: vpextrd $3, %xmm1, %ecx
+; CHECK-NEXT: subl %ecx, %edi
+; CHECK-NEXT: andl $3, %edi
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: addq %rsi, %rax
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: addq %rax, %rdx
+; CHECK-NEXT: andl $1, %ecx
+; CHECK-NEXT: addq %rdx, %rcx
+; CHECK-NEXT: vextractps $3, %xmm0, %r8d
+; CHECK-NEXT: cmpq $4, %rcx
+; CHECK-NEXT: cmovbl -24(%rsp,%rdi,4), %r8d
+; CHECK-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4)
+; CHECK-NEXT: vextractps $2, %xmm0, -24(%rsp,%rax,4)
+; CHECK-NEXT: andl $3, %edx
+; CHECK-NEXT: vextractps $3, %xmm0, -24(%rsp,%rdx,4)
+; CHECK-NEXT: cmpq $3, %rcx
+; CHECK-NEXT: movl $3, %eax
+; CHECK-NEXT: cmovbq %rcx, %rax
+; CHECK-NEXT: movl %r8d, -24(%rsp,%rax,4)
+; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru)
+ ret <4 x i32> %out
+}
+
+define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) {
+; CHECK-LABEL: test_compress_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
+; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1
+; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: vpextrd $1, %xmm1, %edx
+; CHECK-NEXT: vmovd %xmm1, %esi
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: movl %esi, %edi
+; CHECK-NEXT: subl %edx, %edi
+; CHECK-NEXT: vpextrd $2, %xmm1, %ecx
+; CHECK-NEXT: subl %ecx, %edi
+; CHECK-NEXT: vpextrd $3, %xmm1, %eax
+; CHECK-NEXT: subl %eax, %edi
+; CHECK-NEXT: andl $3, %edi
+; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4)
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: addq %rsi, %rdx
+; CHECK-NEXT: vextractps $2, %xmm0, -24(%rsp,%rdx,4)
+; CHECK-NEXT: andl $1, %ecx
+; CHECK-NEXT: addq %rdx, %rcx
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
+; CHECK-NEXT: andl $3, %ecx
+; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; CHECK-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4)
+; CHECK-NEXT: cmpq $3, %rax
+; CHECK-NEXT: movl $3, %ecx
+; CHECK-NEXT: cmovbq %rax, %rcx
+; CHECK-NEXT: ja .LBB1_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4)
+; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: retq
+ %out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru)
+ ret <4 x float> %out
+}
+
+define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) {
+; CHECK-LABEL: test_compress_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
+; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: vpextrq $1, %xmm1, %rax
+; CHECK-NEXT: vmovq %xmm1, %rcx
+; CHECK-NEXT: movl %ecx, %edx
+; CHECK-NEXT: subl %eax, %edx
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: andl $1, %ecx
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: vpextrq $1, %xmm0, %rsi
+; CHECK-NEXT: cmpq $2, %rax
+; CHECK-NEXT: cmovbq -24(%rsp,%rdx,8), %rsi
+; CHECK-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ecx, %ecx
+; CHECK-NEXT: vpextrq $1, %xmm0, -24(%rsp,%rcx,8)
+; CHECK-NEXT: cmpq $1, %rax
+; CHECK-NEXT: movl $1, %ecx
+; CHECK-NEXT: cmovbq %rax, %rcx
+; CHECK-NEXT: movq %rsi, -24(%rsp,%rcx,8)
+; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: retq
+ %out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru)
+ ret <2 x i64> %out
+}
+
+define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) {
+; CHECK-LABEL: test_compress_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
+; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: vpextrq $1, %xmm1, %rax
+; CHECK-NEXT: vmovq %xmm1, %rcx
+; CHECK-NEXT: movl %ecx, %edx
+; CHECK-NEXT: subl %eax, %edx
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: vmovlpd %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: andl $1, %ecx
+; CHECK-NEXT: movl %ecx, %edx
+; CHECK-NEXT: vmovhpd %xmm0, -24(%rsp,%rdx,8)
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: cmpq $2, %rax
+; CHECK-NEXT: jb .LBB3_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
+; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: cmpq $1, %rax
+; CHECK-NEXT: movl $1, %ecx
+; CHECK-NEXT: cmovbq %rax, %rcx
+; CHECK-NEXT: vmovsd %xmm1, -24(%rsp,%rcx,8)
+; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: retq
+ %out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru)
+ ret <2 x double> %out
+}
+
+define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) {
+; CHECK-LABEL: test_compress_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: andq $-32, %rsp
+; CHECK-NEXT: subq $64, %rsp
+; CHECK-NEXT: .cfi_offset %rbx, -24
+; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; CHECK-NEXT: vpslld $31, %ymm1, %ymm1
+; CHECK-NEXT: vpsrad $31, %ymm1, %ymm3
+; CHECK-NEXT: vmovaps %ymm2, (%rsp)
+; CHECK-NEXT: vextracti128 $1, %ymm3, %xmm1
+; CHECK-NEXT: vpackssdw %xmm1, %xmm3, %xmm2
+; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; CHECK-NEXT: vpslld $31, %ymm2, %ymm2
+; CHECK-NEXT: vpsrld $31, %ymm2, %ymm2
+; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm4
+; CHECK-NEXT: vpaddd %xmm4, %xmm2, %xmm2
+; CHECK-NEXT: vpextrd $1, %xmm2, %eax
+; CHECK-NEXT: vmovd %xmm2, %ecx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: vpextrd $2, %xmm2, %edx
+; CHECK-NEXT: vpextrd $3, %xmm2, %eax
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: andl $7, %eax
+; CHECK-NEXT: vpextrd $1, %xmm3, %ecx
+; CHECK-NEXT: andl $1, %ecx
+; CHECK-NEXT: vmovd %xmm3, %edx
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: addq %rdx, %rcx
+; CHECK-NEXT: vpextrd $2, %xmm3, %esi
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: addq %rcx, %rsi
+; CHECK-NEXT: vpextrd $3, %xmm3, %edi
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: addq %rsi, %rdi
+; CHECK-NEXT: vmovd %xmm1, %r8d
+; CHECK-NEXT: andl $1, %r8d
+; CHECK-NEXT: addq %rdi, %r8
+; CHECK-NEXT: vpextrd $1, %xmm1, %r9d
+; CHECK-NEXT: andl $1, %r9d
+; CHECK-NEXT: addq %r8, %r9
+; CHECK-NEXT: vpextrd $2, %xmm1, %r10d
+; CHECK-NEXT: andl $1, %r10d
+; CHECK-NEXT: addq %r9, %r10
+; CHECK-NEXT: vpextrd $3, %xmm1, %r11d
+; CHECK-NEXT: andl $1, %r11d
+; CHECK-NEXT: addq %r10, %r11
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
+; CHECK-NEXT: vextractps $3, %xmm1, %ebx
+; CHECK-NEXT: cmpq $8, %r11
+; CHECK-NEXT: cmovbl (%rsp,%rax,4), %ebx
+; CHECK-NEXT: vmovss %xmm0, (%rsp)
+; CHECK-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4)
+; CHECK-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4)
+; CHECK-NEXT: vextractps $3, %xmm0, (%rsp,%rsi,4)
+; CHECK-NEXT: andl $7, %edi
+; CHECK-NEXT: vmovss %xmm1, (%rsp,%rdi,4)
+; CHECK-NEXT: andl $7, %r8d
+; CHECK-NEXT: vextractps $1, %xmm1, (%rsp,%r8,4)
+; CHECK-NEXT: andl $7, %r9d
+; CHECK-NEXT: vextractps $2, %xmm1, (%rsp,%r9,4)
+; CHECK-NEXT: andl $7, %r10d
+; CHECK-NEXT: vextractps $3, %xmm1, (%rsp,%r10,4)
+; CHECK-NEXT: cmpq $7, %r11
+; CHECK-NEXT: movl $7, %eax
+; CHECK-NEXT: cmovbq %r11, %rax
+; CHECK-NEXT: movl %eax, %eax
+; CHECK-NEXT: movl %ebx, (%rsp,%rax,4)
+; CHECK-NEXT: vmovaps (%rsp), %ymm0
+; CHECK-NEXT: leaq -8(%rbp), %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+ %out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru)
+ ret <8 x i32> %out
+}
+
+define <4 x i32> @test_compress_all_const() {
+; CHECK-LABEL: test_compress_all_const:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0]
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> <i32 3, i32 5, i32 7, i32 9>,
+ <4 x i1> <i1 0, i1 1, i1 0, i1 1>,
+ <4 x i32> undef)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) {
+; CHECK-LABEL: test_compress_const_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 undef, i1 0, i1 1>, <4 x i32> undef)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) {
+; CHECK-LABEL: test_compress_const_mask_passthrough:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3]
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 undef, i1 0, i1 1>, <4 x i32> %passthru)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) {
+; CHECK-LABEL: test_compress_const_mask_const_passthrough:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
+; CHECK-NEXT: movl $7, %eax
+; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; CHECK-NEXT: movl $8, %eax
+; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x i32> <i32 5, i32 6, i32 7, i32 8>)
+ ret <4 x i32> %out
+}
diff --git a/llvm/test/CodeGen/X86/vector-compress-avx512.ll b/llvm/test/CodeGen/X86/vector-compress-avx512.ll
new file mode 100644
index 00000000000000..670d5bc12aabd8
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-compress-avx512.ll
@@ -0,0 +1,455 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -O3 -mtriple=x86_64 -mattr=+avx512f,+avx512vl,+avx512vbmi2 < %s | FileCheck %s
+
+define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) {
+; CHECK-LABEL: test_compress_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
+; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1
+; CHECK-NEXT: vpcompressd %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru)
+ ret <4 x i32> %out
+}
+
+define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) {
+; CHECK-LABEL: test_compress_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
+; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1
+; CHECK-NEXT: vcompressps %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru)
+ ret <4 x float> %out
+}
+
+define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) {
+; CHECK-LABEL: test_compress_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1
+; CHECK-NEXT: vptestmq %xmm1, %xmm1, %k1
+; CHECK-NEXT: vpcompressq %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru)
+ ret <2 x i64> %out
+}
+
+define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) {
+; CHECK-LABEL: test_compress_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1
+; CHECK-NEXT: vptestmq %xmm1, %xmm1, %k1
+; CHECK-NEXT: vcompresspd %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru)
+ ret <2 x double> %out
+}
+
+define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) {
+; CHECK-LABEL: test_compress_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1
+; CHECK-NEXT: vpmovw2m %xmm1, %k1
+; CHECK-NEXT: vpcompressd %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: retq
+ %out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru)
+ ret <8 x i32> %out
+}
+
+define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) {
+; CHECK-LABEL: test_compress_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1
+; CHECK-NEXT: vpmovw2m %xmm1, %k1
+; CHECK-NEXT: vcompressps %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: retq
+ %out = call <8 x float> @llvm.experimental.vector.compress(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru)
+ ret <8 x float> %out
+}
+
+define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) {
+; CHECK-LABEL: test_compress_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
+; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1
+; CHECK-NEXT: vpcompressq %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: retq
+ %out = call <4 x i64> @llvm.experimental.vector.compress(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru)
+ ret <4 x i64> %out
+}
+
+define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) {
+; CHECK-LABEL: test_compress_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
+; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1
+; CHECK-NEXT: vcompresspd %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: retq
+ %out = call <4 x double> @llvm.experimental.vector.compress(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru)
+ ret <4 x double> %out
+}
+
+define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) {
+; CHECK-LABEL: test_compress_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1
+; CHECK-NEXT: vpmovb2m %xmm1, %k1
+; CHECK-NEXT: vpcompressd %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %out = call <16 x i32> @llvm.experimental.vector.compress(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru)
+ ret <16 x i32> %out
+}
+
+define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) {
+; CHECK-LABEL: test_compress_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1
+; CHECK-NEXT: vpmovb2m %xmm1, %k1
+; CHECK-NEXT: vcompressps %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %out = call <16 x float> @llvm.experimental.vector.compress(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru)
+ ret <16 x float> %out
+}
+
+define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) {
+; CHECK-LABEL: test_compress_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1
+; CHECK-NEXT: vpmovw2m %xmm1, %k1
+; CHECK-NEXT: vpcompressq %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %out = call <8 x i64> @llvm.experimental.vector.compress(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru)
+ ret <8 x i64> %out
+}
+
+define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) {
+; CHECK-LABEL: test_compress_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1
+; CHECK-NEXT: vpmovw2m %xmm1, %k1
+; CHECK-NEXT: vcompresspd %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %out = call <8 x double> @llvm.experimental.vector.compress(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru)
+ ret <8 x double> %out
+}
+
+define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) {
+; CHECK-LABEL: test_compress_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1
+; CHECK-NEXT: vpmovb2m %xmm1, %k1
+; CHECK-NEXT: vpcompressb %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru)
+ ret <16 x i8> %out
+}
+
+define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) {
+; CHECK-LABEL: test_compress_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1
+; CHECK-NEXT: vpmovw2m %xmm1, %k1
+; CHECK-NEXT: vpcompressw %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %out = call <8 x i16> @llvm.experimental.vector.compress(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru)
+ ret <8 x i16> %out
+}
+
+define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) {
+; CHECK-LABEL: test_compress_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $7, %ymm1, %ymm1
+; CHECK-NEXT: vpmovb2m %ymm1, %k1
+; CHECK-NEXT: vpcompressb %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: retq
+ %out = call <32 x i8> @llvm.experimental.vector.compress(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru)
+ ret <32 x i8> %out
+}
+
+define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) {
+; CHECK-LABEL: test_compress_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1
+; CHECK-NEXT: vpmovb2m %xmm1, %k1
+; CHECK-NEXT: vpcompressw %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: retq
+ %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru)
+ ret <16 x i16> %out
+}
+
+define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) {
+; CHECK-LABEL: test_compress_v64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $7, %zmm1, %zmm1
+; CHECK-NEXT: vpmovb2m %zmm1, %k1
+; CHECK-NEXT: vpcompressb %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru)
+ ret <64 x i8> %out
+}
+
+define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) {
+; CHECK-LABEL: test_compress_v32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllw $7, %ymm1, %ymm1
+; CHECK-NEXT: vpmovb2m %ymm1, %k1
+; CHECK-NEXT: vpcompressw %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru)
+ ret <32 x i16> %out
+}
+
+define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) {
+; CHECK-LABEL: test_compress_large:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: andq $-64, %rsp
+; CHECK-NEXT: subq $576, %rsp # imm = 0x240
+; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
+; CHECK-NEXT: vpmovb2m %zmm0, %k1
+; CHECK-NEXT: kshiftrq $32, %k1, %k4
+; CHECK-NEXT: kshiftrd $16, %k4, %k3
+; CHECK-NEXT: kshiftrd $16, %k1, %k2
+; CHECK-NEXT: vpcompressd %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm0, (%rsp)
+; CHECK-NEXT: kshiftrw $8, %k1, %k0
+; CHECK-NEXT: kxorw %k0, %k1, %k0
+; CHECK-NEXT: kshiftrw $4, %k0, %k5
+; CHECK-NEXT: kxorw %k5, %k0, %k0
+; CHECK-NEXT: kshiftrw $2, %k0, %k5
+; CHECK-NEXT: kxorw %k5, %k0, %k0
+; CHECK-NEXT: kshiftrw $1, %k0, %k5
+; CHECK-NEXT: kxorw %k5, %k0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: andl $31, %eax
+; CHECK-NEXT: vpcompressd %zmm2, %zmm0 {%k2} {z}
+; CHECK-NEXT: vmovdqa64 %zmm0, (%rsp,%rax,4)
+; CHECK-NEXT: vpcompressd %zmm3, %zmm0 {%k4} {z}
+; CHECK-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: kshiftrw $8, %k4, %k0
+; CHECK-NEXT: kxorw %k0, %k4, %k0
+; CHECK-NEXT: kshiftrw $4, %k0, %k4
+; CHECK-NEXT: kxorw %k4, %k0, %k0
+; CHECK-NEXT: kshiftrw $2, %k0, %k4
+; CHECK-NEXT: kxorw %k4, %k0, %k0
+; CHECK-NEXT: kshiftrw $1, %k0, %k4
+; CHECK-NEXT: kxorw %k4, %k0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: andl $31, %eax
+; CHECK-NEXT: vpcompressd %zmm4, %zmm0 {%k3} {z}
+; CHECK-NEXT: vmovdqa64 %zmm0, 128(%rsp,%rax,4)
+; CHECK-NEXT: vmovaps (%rsp), %zmm0
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1
+; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: kxorw %k2, %k1, %k0
+; CHECK-NEXT: kshiftrw $8, %k0, %k1
+; CHECK-NEXT: kxorw %k1, %k0, %k0
+; CHECK-NEXT: kshiftrw $4, %k0, %k1
+; CHECK-NEXT: kxorw %k1, %k0, %k0
+; CHECK-NEXT: kshiftrw $2, %k0, %k1
+; CHECK-NEXT: kxorw %k1, %k0, %k0
+; CHECK-NEXT: kshiftrw $1, %k0, %k1
+; CHECK-NEXT: kxorw %k1, %k0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: andl $63, %eax
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2
+; CHECK-NEXT: vmovaps %zmm0, 256(%rsp,%rax,4)
+; CHECK-NEXT: vmovaps %zmm1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovaps %zmm2, 320(%rsp,%rax,4)
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm3
+; CHECK-NEXT: movq %rbp, %rsp
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+ %out = call <64 x i32> @llvm.experimental.vector.compress(<64 x i32> %vec, <64 x i1> %mask, <64 x i32> undef)
+ ret <64 x i32> %out
+}
+
+define <4 x i32> @test_compress_all_const() {
+; CHECK-LABEL: test_compress_all_const:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0]
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> <i32 3, i32 5, i32 7, i32 9>,
+ <4 x i1> <i1 0, i1 1, i1 0, i1 1>,
+ <4 x i32> undef)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) {
+; CHECK-LABEL: test_compress_const_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 undef, i1 0, i1 1>, <4 x i32> undef)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) {
+; CHECK-LABEL: test_compress_const_mask_passthrough:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3]
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 undef, i1 0, i1 1>, <4 x i32> %passthru)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) {
+; CHECK-LABEL: test_compress_const_mask_const_passthrough:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
+; CHECK-NEXT: movl $7, %eax
+; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; CHECK-NEXT: movl $8, %eax
+; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x i32> <i32 5, i32 6, i32 7, i32 8>)
+ ret <4 x i32> %out
+}
+
+; We pass a placeholder value for the const_mask* tests to check that they are converted to a no-op by simply copying
+; the second vector input register to the return register or doing nothing.
+define <4 x i32> @test_compress_const_splat1_mask(<4 x i32> %ignore, <4 x i32> %vec) {
+; CHECK-LABEL: test_compress_const_splat1_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 -1), <4 x i32> undef)
+ ret <4 x i32> %out
+}
+define <4 x i32> @test_compress_const_splat0_mask(<4 x i32> %ignore, <4 x i32> %vec) {
+; CHECK-LABEL: test_compress_const_splat0_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef)
+ ret <4 x i32> %out
+}
+define <4 x i32> @test_compress_undef_mask(<4 x i32> %ignore, <4 x i32> %vec) {
+; CHECK-LABEL: test_compress_undef_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> undef, <4 x i32> undef)
+ ret <4 x i32> %out
+}
+define <4 x i32> @test_compress_const_splat0_mask_with_passthru(<4 x i32> %ignore, <4 x i32> %vec, <4 x i32> %passthru) {
+; CHECK-LABEL: test_compress_const_splat0_mask_with_passthru:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmovaps %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> %passthru)
+ ret <4 x i32> %out
+}
+define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ignore, <4 x i32> %vec) {
+; CHECK-LABEL: test_compress_const_splat0_mask_without_passthru:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef)
+ ret <4 x i32> %out
+}
+
+define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) {
+; CHECK-LABEL: test_compress_small:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
+; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1
+; CHECK-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %out = call <4 x i8> @llvm.experimental.vector.compress(<4 x i8> %vec, <4 x i1> %mask, <4 x i8> undef)
+ ret <4 x i8> %out
+}
+
+define <4 x i4> @test_compress_illegal_element_type(<4 x i4> %vec, <4 x i1> %mask) {
+; CHECK-LABEL: test_compress_illegal_element_type:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
+; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1
+; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %out = call <4 x i4> @llvm.experimental.vector.compress(<4 x i4> %vec, <4 x i1> %mask, <4 x i4> undef)
+ ret <4 x i4> %out
+}
+
+define <3 x i32> @test_compress_narrow(<3 x i32> %vec, <3 x i1> %mask) {
+; CHECK-LABEL: test_compress_narrow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: kshiftlw $15, %k1, %k1
+; CHECK-NEXT: kshiftrw $14, %k1, %k1
+; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: movw $-5, %ax
+; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: kandw %k1, %k0, %k0
+; CHECK-NEXT: kmovd %edx, %k1
+; CHECK-NEXT: kshiftlw $15, %k1, %k1
+; CHECK-NEXT: kshiftrw $13, %k1, %k1
+; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: movb $7, %al
+; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: kandw %k1, %k0, %k1
+; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %out = call <3 x i32> @llvm.experimental.vector.compress(<3 x i32> %vec, <3 x i1> %mask, <3 x i32> undef)
+ ret <3 x i32> %out
+}
+
+define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i1> %mask) {
+; CHECK-LABEL: test_compress_narrow_illegal_element_type:
+; CHECK: # %bb.0:
+; CHECK-NEXT: andl $1, %ecx
+; CHECK-NEXT: kmovw %ecx, %k0
+; CHECK-NEXT: kmovd %r8d, %k1
+; CHECK-NEXT: kshiftlw $15, %k1, %k1
+; CHECK-NEXT: kshiftrw $14, %k1, %k1
+; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: movw $-5, %ax
+; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: kandw %k1, %k0, %k0
+; CHECK-NEXT: kmovd %r9d, %k1
+; CHECK-NEXT: kshiftlw $15, %k1, %k1
+; CHECK-NEXT: kshiftrw $13, %k1, %k1
+; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: movb $7, %al
+; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: kandw %k1, %k0, %k1
+; CHECK-NEXT: vmovd %edi, %xmm0
+; CHECK-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
+; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: vpextrb $4, %xmm0, %edx
+; CHECK-NEXT: vpextrb $8, %xmm0, %ecx
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: # kill: def $dl killed $dl killed $edx
+; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT: retq
+ %out = call <3 x i3> @llvm.experimental.vector.compress(<3 x i3> %vec, <3 x i1> %mask, <3 x i3> undef)
+ ret <3 x i3> %out
+}
>From 087e5111f2c3d902e0ecbc8a9a8ec84678e23098 Mon Sep 17 00:00:00 2001
From: Lawrence Benson <github at lawben.com>
Date: Tue, 20 Aug 2024 13:38:24 +0200
Subject: [PATCH 3/4] Fix formatting
---
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index cebbc80974d6fa..8252c0338252c6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -11585,8 +11585,9 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,
EVT PopcountVT = ScalarVT.changeTypeToInteger();
SDValue Popcount = DAG.getNode(
ISD::TRUNCATE, DL, MaskVT.changeVectorElementType(MVT::i1), Mask);
- Popcount = DAG.getNode(ISD::ZERO_EXTEND, DL,
- MaskVT.changeVectorElementType(PopcountVT), Popcount);
+ Popcount =
+ DAG.getNode(ISD::ZERO_EXTEND, DL,
+ MaskVT.changeVectorElementType(PopcountVT), Popcount);
Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, PopcountVT, Popcount);
SDValue LastElmtPtr =
getVectorElementPointer(DAG, StackPtr, VecVT, Popcount);
@@ -11628,8 +11629,8 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,
// overwrite the last write it with the passthru value.
SDNodeFlags Flags{};
Flags.setUnpredictable(true);
- LastWriteVal =
- DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal, Flags);
+ LastWriteVal = DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI,
+ LastWriteVal, Flags);
Chain = DAG.getStore(
Chain, DL, LastWriteVal, OutPtr,
MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
>From ef0beacce31f2b5e550ac10ec74653393dd5d568 Mon Sep 17 00:00:00 2001
From: Lawrence Benson <github at lawben.com>
Date: Tue, 20 Aug 2024 14:15:25 +0200
Subject: [PATCH 4/4] Add VLX check
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index fa7c7b2789b7dc..fcdf12c034d524 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2321,15 +2321,26 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}
+ // vpcompress depends on various AVX512 extensions.
if (Subtarget.hasAVX512()) {
- for (MVT VT : {MVT::v4i32, MVT::v4f32, MVT::v2i64, MVT::v2f64, MVT::v8i32,
- MVT::v8f32, MVT::v4i64, MVT::v4f64, MVT::v16i32, MVT::v16f32,
- MVT::v8i64, MVT::v8f64})
+ // Legal in AVX512F
+ for (MVT VT : {MVT::v16i32, MVT::v16f32, MVT::v8i64, MVT::v8f64})
setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal);
+ // Legal in AVX512F + AVX512VL
+ if (Subtarget.hasVLX())
+ for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v4i32, MVT::v4f32, MVT::v4i64,
+ MVT::v4f64, MVT::v2i64, MVT::v2f64})
+ setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal);
+
+ // Legal in AVX512F + AVX512VBMI2
if (Subtarget.hasVBMI2())
- for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16,
- MVT::v64i8, MVT::v32i16})
+ for (MVT VT : {MVT::v32i16, MVT::v64i8})
+ setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal);
+
+ // Legal in AVX512F + AVX512VL + AVX512VBMI2
+ if (Subtarget.hasVBMI2() && Subtarget.hasVLX())
+ for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16})
setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal);
}
More information about the llvm-commits
mailing list