[llvm] 1c3d770 - [X86][SSE] Add tests for missing BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y)) fold

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 14 09:13:32 PDT 2020


Author: Simon Pilgrim
Date: 2020-06-14T17:10:03+01:00
New Revision: 1c3d7709dec22c61d9c3105e4838edce8e6ac014

URL: https://github.com/llvm/llvm-project/commit/1c3d7709dec22c61d9c3105e4838edce8e6ac014
DIFF: https://github.com/llvm/llvm-project/commit/1c3d7709dec22c61d9c3105e4838edce8e6ac014.diff

LOG: [X86][SSE] Add tests for missing BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y)) fold

This would help reduce XMM->GPR traffic for some reduction cases.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-movmsk.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-movmsk.ll b/llvm/test/CodeGen/X86/combine-movmsk.ll
index b0d4d7e4fbef..df5571aeccf3 100644
--- a/llvm/test/CodeGen/X86/combine-movmsk.ll
+++ b/llvm/test/CodeGen/X86/combine-movmsk.ll
@@ -177,3 +177,96 @@ define i1 @pmovmskb_allof_bitcast_v4f32(<4 x float> %a0) {
   %5 = icmp eq i32 %4, 65535
   ret i1 %5
 }
+
+; AND(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(AND(X,Y))
+; XOR(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(XOR(X,Y))
+; OR(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(OR(X,Y))
+; if the elements are the same width.
+
+define i32 @and_movmskpd_movmskpd(<2 x double> %a0, <2 x i64> %a1) {
+; SSE-LABEL: and_movmskpd_movmskpd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorpd %xmm2, %xmm2
+; SSE-NEXT:    cmpeqpd %xmm0, %xmm2
+; SSE-NEXT:    movmskpd %xmm2, %ecx
+; SSE-NEXT:    movmskpd %xmm1, %eax
+; SSE-NEXT:    andl %ecx, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: and_movmskpd_movmskpd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vcmpeqpd %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmovmskpd %xmm0, %ecx
+; AVX-NEXT:    vmovmskpd %xmm1, %eax
+; AVX-NEXT:    andl %ecx, %eax
+; AVX-NEXT:    retq
+  %1 = fcmp oeq <2 x double> zeroinitializer, %a0
+  %2 = sext <2 x i1> %1 to <2 x i64>
+  %3 = bitcast <2 x i64> %2 to <2 x double>
+  %4 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %3)
+  %5 = icmp sgt <2 x i64> zeroinitializer, %a1
+  %6 = bitcast <2 x i1> %5 to i2
+  %7 = zext i2 %6 to i32
+  %8 = and i32 %4, %7
+  ret i32 %8
+}
+
+define i32 @xor_movmskps_movmskps(<4 x float> %a0, <4 x i32> %a1) {
+; SSE-LABEL: xor_movmskps_movmskps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm2, %xmm2
+; SSE-NEXT:    cmpeqps %xmm0, %xmm2
+; SSE-NEXT:    movmskps %xmm2, %ecx
+; SSE-NEXT:    movmskps %xmm1, %eax
+; SSE-NEXT:    xorl %ecx, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: xor_movmskps_movmskps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vcmpeqps %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmovmskps %xmm0, %ecx
+; AVX-NEXT:    vmovmskps %xmm1, %eax
+; AVX-NEXT:    xorl %ecx, %eax
+; AVX-NEXT:    retq
+  %1 = fcmp oeq <4 x float> zeroinitializer, %a0
+  %2 = sext <4 x i1> %1 to <4 x i32>
+  %3 = bitcast <4 x i32> %2 to <4 x float>
+  %4 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %3)
+  %5 = ashr <4 x i32> %a1, <i32 31, i32 31, i32 31, i32 31>
+  %6 = bitcast <4 x i32> %5 to <4 x float>
+  %7 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %6)
+  %8 = xor i32 %4, %7
+  ret i32 %8
+}
+
+define i32 @or_pmovmskb_pmovmskb(<16 x i8> %a0, <8 x i16> %a1) {
+; SSE-LABEL: or_pmovmskb_pmovmskb:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    pcmpeqb %xmm0, %xmm2
+; SSE-NEXT:    pmovmskb %xmm2, %ecx
+; SSE-NEXT:    psraw $15, %xmm1
+; SSE-NEXT:    pmovmskb %xmm1, %eax
+; SSE-NEXT:    orl %ecx, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: or_pmovmskb_pmovmskb:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpmovmskb %xmm0, %ecx
+; AVX-NEXT:    vpsraw $15, %xmm1, %xmm0
+; AVX-NEXT:    vpmovmskb %xmm0, %eax
+; AVX-NEXT:    orl %ecx, %eax
+; AVX-NEXT:    retq
+  %1 = icmp eq <16 x i8> zeroinitializer, %a0
+  %2 = sext <16 x i1> %1 to <16 x i8>
+  %3 = tail call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %2)
+  %4 = ashr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  %5 = bitcast <8 x i16> %4 to <16 x i8>
+  %6 = tail call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %5)
+  %7 = or i32 %3, %6
+  ret i32 %7
+}


        


More information about the llvm-commits mailing list