[llvm] 4e3ba52 - [X86] Add tests showing failure to fold MOVMSK(CMPEQ(AND(X,C1),0)) -> MOVMSK(NOT(SHL(X,C2)))
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Jan 30 07:45:01 PST 2022
Author: Simon Pilgrim
Date: 2022-01-30T15:42:59Z
New Revision: 4e3ba526bf48c19a602499acd1ebc4e6535d8f01
URL: https://github.com/llvm/llvm-project/commit/4e3ba526bf48c19a602499acd1ebc4e6535d8f01
DIFF: https://github.com/llvm/llvm-project/commit/4e3ba526bf48c19a602499acd1ebc4e6535d8f01.diff
LOG: [X86] Add tests showing failure to fold MOVMSK(CMPEQ(AND(X,C1),0)) -> MOVMSK(NOT(SHL(X,C2)))
This would allow pow2 mask tests to avoid an unnecessary constant load.
Noticed while investigating how to extend MatchVectorAllZeroTest to support more allof/anyof patterns.
Added:
Modified:
llvm/test/CodeGen/X86/combine-movmsk.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/combine-movmsk.ll b/llvm/test/CodeGen/X86/combine-movmsk.ll
index cc01fc1506c56..b91371742de25 100644
--- a/llvm/test/CodeGen/X86/combine-movmsk.ll
+++ b/llvm/test/CodeGen/X86/combine-movmsk.ll
@@ -220,6 +220,80 @@ define i1 @pmovmskb_noneof_v16i8_positive(<16 x i8> %a0) {
ret i1 %4
}
+; TODO: MOVMSK(CMPEQ(AND(X,C1),0)) -> MOVMSK(NOT(SHL(X,C2)))
+define i32 @movmskpd_pow2_mask(<2 x i64> %a0) {
+; SSE2-LABEL: movmskpd_pow2_mask:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movmskpd %xmm0, %eax
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: movmskpd_pow2_mask:
+; SSE42: # %bb.0:
+; SSE42-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE42-NEXT: pxor %xmm1, %xmm1
+; SSE42-NEXT: pcmpeqq %xmm0, %xmm1
+; SSE42-NEXT: movmskpd %xmm1, %eax
+; SSE42-NEXT: retq
+;
+; AVX-LABEL: movmskpd_pow2_mask:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskpd %xmm0, %eax
+; AVX-NEXT: retq
+ %1 = and <2 x i64> %a0, <i64 -9223372036854775808, i64 -9223372036854775808>
+ %2 = icmp eq <2 x i64> %1, zeroinitializer
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ %4 = bitcast <2 x i64> %3 to <2 x double>
+ %5 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %4)
+ ret i32 %5
+}
+
+define i32 @movmskps_pow2_mask(<4 x i32> %a0) {
+; SSE-LABEL: movmskps_pow2_mask:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE-NEXT: movmskps %xmm1, %eax
+; SSE-NEXT: retq
+ %1 = and <4 x i32> %a0, <i32 4, i32 4, i32 4, i32 4>
+ %2 = icmp eq <4 x i32> %1, zeroinitializer
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ %4 = bitcast <4 x i32> %3 to <4 x float>
+ %5 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %4)
+ ret i32 %5
+}
+
+define i32 @pmovmskb_pow2_mask(<16 x i8> %a0) {
+; SSE-LABEL: pmovmskb_pow2_mask:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: pmovmskb_pow2_mask:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: retq
+ %1 = and <16 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %2 = icmp eq <16 x i8> %1, zeroinitializer
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ %4 = tail call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %3)
+ ret i32 %4
+}
+
; AND(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(AND(X,Y))
; XOR(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(XOR(X,Y))
; OR(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(OR(X,Y))
More information about the llvm-commits
mailing list