[llvm] 320969f - [X86] LowerVectorAllZero - add 512-bit support with AVX512 vptestnmd+kortestw patterns (REAPPLIED)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 23 06:37:21 PDT 2023
Author: Simon Pilgrim
Date: 2023-03-23T13:37:09Z
New Revision: 320969f5058bfffd6517c36771b46ac4a447c7ee
URL: https://github.com/llvm/llvm-project/commit/320969f5058bfffd6517c36771b46ac4a447c7ee
DIFF: https://github.com/llvm/llvm-project/commit/320969f5058bfffd6517c36771b46ac4a447c7ee.diff
LOG: [X86] LowerVectorAllZero - add 512-bit support with AVX512 vptestnmd+kortestw patterns (REAPPLIED)
Another step toward #53419 - this is also another step towards expanding MatchVectorAllZeroTest to match any pair of vectors and merge EmitAVX512Test into it.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/ptest.ll
llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 74e2a2b6fdc10..2d371566381c8 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -24194,18 +24194,27 @@ static SDValue LowerVectorAllZero(const SDLoc &DL, SDValue V, ISD::CondCode CC,
// Without PTEST, a masked v2i64 or-reduction is not faster than
// scalarization.
+ bool UseKORTEST = Subtarget.useAVX512Regs();
bool UsePTEST = Subtarget.hasSSE41();
if (!UsePTEST && !Mask.isAllOnes() && VT.getScalarSizeInBits() > 32)
return SDValue();
- // Split down to 128/256-bit vector.
- unsigned TestSize = Subtarget.hasAVX() ? 256 : 128;
+ // Split down to 128/256/512-bit vector.
+ unsigned TestSize = UseKORTEST ? 512 : (Subtarget.hasAVX() ? 256 : 128);
while (VT.getSizeInBits() > TestSize) {
auto Split = DAG.SplitVector(V, DL);
VT = Split.first.getValueType();
V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
}
+ if (UseKORTEST && VT.is512BitVector()) {
+ V = DAG.getBitcast(MVT::v16i32, MaskBits(V));
+ V = DAG.getSetCC(DL, MVT::v16i1, V,
+ getZeroVector(MVT::v16i32, Subtarget, DAG, DL),
+ ISD::SETNE);
+ return DAG.getNode(X86ISD::KORTEST, DL, MVT::i32, V, V);
+ }
+
if (UsePTEST) {
MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
V = DAG.getBitcast(TestVT, MaskBits(V));
diff --git a/llvm/test/CodeGen/X86/ptest.ll b/llvm/test/CodeGen/X86/ptest.ll
index 066cbb6193317..bedcfebc5f6e7 100644
--- a/llvm/test/CodeGen/X86/ptest.ll
+++ b/llvm/test/CodeGen/X86/ptest.ll
@@ -148,9 +148,8 @@ define i32 @veccond512(<16 x i32> %input) {
;
; AVX512-LABEL: veccond512:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: je .LBB2_2
; AVX512-NEXT: # %bb.1: # %if-true-block
; AVX512-NEXT: xorl %eax, %eax
@@ -268,10 +267,9 @@ define i32 @vectest512(<16 x i32> %input) {
;
; AVX512-LABEL: vectest512:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -380,9 +378,8 @@ define i32 @vecsel512(<16 x i32> %input, i32 %a, i32 %b) {
; AVX512-LABEL: vecsel512:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %edi, %eax
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: cmovel %esi, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
index fcb0ab6090398..a489a5e6099f0 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
@@ -105,9 +105,8 @@ define i1 @test_v8i64(<8 x i64> %a0) {
;
; AVX512-LABEL: test_v8i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -169,9 +168,8 @@ define i1 @test_v16i64(<16 x i64> %a0) {
; AVX512-LABEL: test_v16i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -298,9 +296,8 @@ define i1 @test_v16i32(<16 x i32> %a0) {
;
; AVX512-LABEL: test_v16i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -362,9 +359,8 @@ define i1 @test_v32i32(<32 x i32> %a0) {
; AVX512-LABEL: test_v32i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -510,9 +506,8 @@ define i1 @test_v32i16(<32 x i16> %a0) {
;
; AVX512-LABEL: test_v32i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -574,9 +569,8 @@ define i1 @test_v64i16(<64 x i16> %a0) {
; AVX512-LABEL: test_v64i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -741,9 +735,8 @@ define i1 @test_v64i8(<64 x i8> %a0) {
;
; AVX512-LABEL: test_v64i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -805,9 +798,8 @@ define i1 @test_v128i8(<128 x i8> %a0) {
; AVX512-LABEL: test_v128i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vptest %ymm0, %ymm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -1014,10 +1006,8 @@ define i1 @mask_v128i8(<128 x i8> %a0) {
; AVX512-LABEL: mask_v128i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
-; AVX512-NEXT: vptest %ymm1, %ymm0
+; AVX512-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
More information about the llvm-commits
mailing list