[llvm] 0741b75 - [X86][SSE] Attempt to widen MOVMSK vector input if the signbits are splatted.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Jun 7 03:52:24 PDT 2020
Author: Simon Pilgrim
Date: 2020-06-07T11:44:43+01:00
New Revision: 0741b75ad543d108759c0658fedb5fdfcf064487
URL: https://github.com/llvm/llvm-project/commit/0741b75ad543d108759c0658fedb5fdfcf064487
DIFF: https://github.com/llvm/llvm-project/commit/0741b75ad543d108759c0658fedb5fdfcf064487.diff
LOG: [X86][SSE] Attempt to widen MOVMSK vector input if the signbits are splatted.
As shown on PR37087, if we have a MOVMSK(BICAST(X)) from a wider vector, then by using MOVMSK from the wider type (32/64-bit elements) we can improve the chances of further combines with SimplifyDemandedBits/Elts and on some targets (skylake) can be more efficient.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/combine-movmsk-avx.ll
llvm/test/CodeGen/X86/combine-movmsk.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 467c21e0f540..b373e63e7dfb 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -40260,6 +40260,24 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
"Unexpected MOVMSK operand");
+ // See if we can peek through to a vector with a wider element type, if the
+ // signbits extend down to all the sub-elements as well.
+ // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
+ // potential SimplifyDemandedBits/Elts cases.
+ if (Vec.getOpcode() == ISD::BITCAST) {
+ SDValue BC = peekThroughBitcasts(Vec);
+ unsigned NumEltBits = VecVT.getScalarSizeInBits();
+ unsigned BCNumEltBits = BC.getScalarValueSizeInBits();
+ if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
+ BCNumEltBits > NumEltBits &&
+ DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
+ SDLoc DL(EFLAGS);
+ return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
+ DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
+ DAG.getConstant(0, DL, MVT::i32));
+ }
+ }
+
// See if we can avoid a PACKSS by calling MOVMSK on the sources.
// For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
// sign bits prior to the comparison with zero unless we know that
diff --git a/llvm/test/CodeGen/X86/combine-movmsk-avx.ll b/llvm/test/CodeGen/X86/combine-movmsk-avx.ll
index 5b1d5a3b8ed2..125e5eed6dbd 100644
--- a/llvm/test/CodeGen/X86/combine-movmsk-avx.ll
+++ b/llvm/test/CodeGen/X86/combine-movmsk-avx.ll
@@ -5,14 +5,14 @@
declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>)
declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>)
-; TODO - Use widest possible vector for movmsk comparisons
+; Use widest possible vector for movmsk comparisons
define i1 @movmskps_bitcast_v4f64(<4 x double> %a0) {
; CHECK-LABEL: movmskps_bitcast_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vmovmskps %ymm0, %eax
+; CHECK-NEXT: vmovmskpd %ymm0, %eax
; CHECK-NEXT: testl %eax, %eax
; CHECK-NEXT: sete %al
; CHECK-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/combine-movmsk.ll b/llvm/test/CodeGen/X86/combine-movmsk.ll
index dff939ae1ef6..c05b03322d31 100644
--- a/llvm/test/CodeGen/X86/combine-movmsk.ll
+++ b/llvm/test/CodeGen/X86/combine-movmsk.ll
@@ -8,14 +8,14 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>)
declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>)
declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>)
-; TODO - Use widest possible vector for movmsk comparisons
+; Use widest possible vector for movmsk comparisons
define i1 @movmskps_bitcast_v2f64(<2 x double> %a0) {
; SSE-LABEL: movmskps_bitcast_v2f64:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm1, %xmm1
; SSE-NEXT: cmpeqpd %xmm0, %xmm1
-; SSE-NEXT: movmskps %xmm1, %eax
+; SSE-NEXT: movmskpd %xmm1, %eax
; SSE-NEXT: testl %eax, %eax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
@@ -24,7 +24,7 @@ define i1 @movmskps_bitcast_v2f64(<2 x double> %a0) {
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vcmpeqpd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: vmovmskpd %xmm0, %eax
; AVX-NEXT: testl %eax, %eax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
@@ -36,31 +36,27 @@ define i1 @movmskps_bitcast_v2f64(<2 x double> %a0) {
ret i1 %5
}
-define i1 @movmskps_bitcast_v2i64(<2 x i64> %a0) {
-; SSE2-LABEL: movmskps_bitcast_v2i64:
+define i1 @pmovmskb_bitcast_v2i64(<2 x i64> %a0) {
+; SSE2-LABEL: pmovmskb_bitcast_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: movmskps %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
; SSE2-NEXT: sete %al
; SSE2-NEXT: retq
;
-; SSE42-LABEL: movmskps_bitcast_v2i64:
+; SSE42-LABEL: pmovmskb_bitcast_v2i64:
; SSE42: # %bb.0:
-; SSE42-NEXT: pxor %xmm1, %xmm1
-; SSE42-NEXT: pcmpgtq %xmm0, %xmm1
-; SSE42-NEXT: pmovmskb %xmm1, %eax
+; SSE42-NEXT: movmskpd %xmm0, %eax
; SSE42-NEXT: testl %eax, %eax
; SSE42-NEXT: sete %al
; SSE42-NEXT: retq
;
-; AVX-LABEL: movmskps_bitcast_v2i64:
+; AVX-LABEL: pmovmskb_bitcast_v2i64:
; AVX: # %bb.0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: vmovmskpd %xmm0, %eax
; AVX-NEXT: testl %eax, %eax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
@@ -72,21 +68,21 @@ define i1 @movmskps_bitcast_v2i64(<2 x i64> %a0) {
ret i1 %5
}
-define i1 @movmskps_bitcast_v4f32(<4 x float> %a0) {
-; SSE-LABEL: movmskps_bitcast_v4f32:
+define i1 @pmovmskb_bitcast_v4f32(<4 x float> %a0) {
+; SSE-LABEL: pmovmskb_bitcast_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cmpeqps %xmm0, %xmm1
-; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: movmskps %xmm1, %eax
; SSE-NEXT: testl %eax, %eax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
-; AVX-LABEL: movmskps_bitcast_v4f32:
+; AVX-LABEL: pmovmskb_bitcast_v4f32:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: testl %eax, %eax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
More information about the llvm-commits
mailing list