[llvm] 410667f - [X86][SSE] Convert PTEST to MOVMSK for allsign bits vector results
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed May 27 03:26:42 PDT 2020
Author: Simon Pilgrim
Date: 2020-05-27T11:06:16+01:00
New Revision: 410667f1b74c614d9382f180d29f5aa1e42cc5c9
URL: https://github.com/llvm/llvm-project/commit/410667f1b74c614d9382f180d29f5aa1e42cc5c9
DIFF: https://github.com/llvm/llvm-project/commit/410667f1b74c614d9382f180d29f5aa1e42cc5c9.diff
LOG: [X86][SSE] Convert PTEST to MOVMSK for allsign bits vector results
If we are using PTEST to check 'allsign bits' vector elements we can use MOVMSK to extract the signbits directly and perform the comparison on the scalar value.
For vXi16 cases, as we don't have a MOVMSK for this type, we must mask each signbit out of a PMOVMSKB v2Xi8 result, which folds into the TEST comparison.
If this allows us to remove a vector op (via the SimplifyMultipleUseDemandedBits call) this is consistently faster than a PTEST (https://godbolt.org/z/ziJUst).
I'm investigating whether we ever get regressions without the SimplifyMultipleUseDemandedBits call, even if this means we don't remove a vector op, but that has exposed some other poor codegen issues that I'm still investigating and would have to wait for a later patch.
Suggested on PR42035 to avoid unnecessary ashr(x,bw-1)/pcmpgt(0,x) sign splat patterns feeding into ptest.
Differential Revision: https://reviews.llvm.org/D80563
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/combine-ptest.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e086c65c40cb..d70b5a7f3a22 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -40079,7 +40079,8 @@ static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
/// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
/// to avoid the inversion.
static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
// TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
if (EFLAGS.getOpcode() != X86ISD::PTEST &&
EFLAGS.getOpcode() != X86ISD::TESTP)
@@ -40141,6 +40142,9 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
if (Op0 == Op1) {
SDValue BC = peekThroughBitcasts(Op0);
+ EVT BCVT = BC.getValueType();
+ assert(BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
+ "Unexpected vector type");
// TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
@@ -40156,6 +40160,35 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
DAG.getBitcast(OpVT, BC.getOperand(0)),
DAG.getBitcast(OpVT, BC.getOperand(1)));
}
+
+ // If every element is an all-sign value, see if we can use MOVMSK to
+ // more efficiently extract the sign bits and compare that.
+ // TODO: Handle TESTC with comparison inversion.
+ // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
+ // MOVMSK combines to make sure its never worse than PTEST?
+ unsigned EltBits = BCVT.getScalarSizeInBits();
+ if (DAG.ComputeNumSignBits(BC) == EltBits) {
+ assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
+ APInt SignMask = APInt::getSignMask(EltBits);
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (SDValue Res =
+ TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
+ // For vXi16 cases we need to use pmovmksb and extract every other
+ // sign bit.
+ SDLoc DL(EFLAGS);
+ if (EltBits == 16) {
+ MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
+ Res = DAG.getBitcast(MovmskVT, Res);
+ Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
+ Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
+ DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
+ } else {
+ Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
+ }
+ return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
+ DAG.getConstant(0, DL, MVT::i32));
+ }
+ }
}
// TESTZ(-1,X) == TESTZ(X,X)
@@ -40183,7 +40216,7 @@ static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
return R;
- if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG))
+ if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
return R;
return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
diff --git a/llvm/test/CodeGen/X86/combine-ptest.ll b/llvm/test/CodeGen/X86/combine-ptest.ll
index 2928023c7fc2..975440cf8297 100644
--- a/llvm/test/CodeGen/X86/combine-ptest.ll
+++ b/llvm/test/CodeGen/X86/combine-ptest.ll
@@ -299,16 +299,15 @@ start:
}
;
-; TODO: testz(ashr(X,bw-1),-1) -> movmsk(X)
+; testz(ashr(X,bw-1),-1) -> movmsk(X)
;
define i32 @ptestz_v2i64_signbits(<2 x i64> %c, i32 %a, i32 %b) {
; CHECK-LABEL: ptestz_v2i64_signbits:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
-; CHECK-NEXT: vptest %xmm0, %xmm0
+; CHECK-NEXT: vmovmskpd %xmm0, %ecx
+; CHECK-NEXT: testl %ecx, %ecx
; CHECK-NEXT: cmovnel %esi, %eax
; CHECK-NEXT: retq
%t1 = ashr <2 x i64> %c, <i64 63, i64 63>
@@ -334,8 +333,8 @@ define i32 @ptestz_v8i32_signbits(<8 x i32> %c, i32 %a, i32 %b) {
; AVX2-LABEL: ptestz_v8i32_signbits:
; AVX2: # %bb.0:
; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
-; AVX2-NEXT: vptest %ymm0, %ymm0
+; AVX2-NEXT: vmovmskps %ymm0, %ecx
+; AVX2-NEXT: testl %ecx, %ecx
; AVX2-NEXT: cmovnel %esi, %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -351,8 +350,8 @@ define i32 @ptestz_v8i16_signbits(<8 x i16> %c, i32 %a, i32 %b) {
; CHECK-LABEL: ptestz_v8i16_signbits:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: vpsraw $15, %xmm0, %xmm0
-; CHECK-NEXT: vptest %xmm0, %xmm0
+; CHECK-NEXT: vpmovmskb %xmm0, %ecx
+; CHECK-NEXT: testl $43690, %ecx # imm = 0xAAAA
; CHECK-NEXT: cmovnel %esi, %eax
; CHECK-NEXT: retq
%t1 = ashr <8 x i16> %c, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
@@ -380,9 +379,8 @@ define i32 @ptestz_v32i8_signbits(<32 x i8> %c, i32 %a, i32 %b) {
; AVX2-LABEL: ptestz_v32i8_signbits:
; AVX2: # %bb.0:
; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vptest %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %ecx
+; AVX2-NEXT: testl %ecx, %ecx
; AVX2-NEXT: cmovnel %esi, %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
More information about the llvm-commits
mailing list