[llvm] 06df1a2 - [X86] vector-reduce-add-sext - add sum of (sign-extended) bool vector tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Feb 13 11:40:12 PST 2022
Author: Simon Pilgrim
Date: 2022-02-13T19:40:07Z
New Revision: 06df1a2eff841d20b1811b5acca7a200aa08b0fb
URL: https://github.com/llvm/llvm-project/commit/06df1a2eff841d20b1811b5acca7a200aa08b0fb
DIFF: https://github.com/llvm/llvm-project/commit/06df1a2eff841d20b1811b5acca7a200aa08b0fb.diff
LOG: [X86] vector-reduce-add-sext - add sum of (sign-extended) bool vector tests
Added:
Modified:
llvm/test/CodeGen/X86/vector-reduce-add-sext.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/vector-reduce-add-sext.ll b/llvm/test/CodeGen/X86/vector-reduce-add-sext.ll
index 7711ec35ba22..5e001758eea4 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-add-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-add-sext.ll
@@ -1483,6 +1483,571 @@ define i16 @test_v64i16_v64i8(<64 x i8> %a0) {
}
+;
+; vXi1 - sum of extended bool vectors
+;
+
+define i64 @test_v2i64_v2i1(<2 x i64> %a0) {
+; SSE2-LABEL: test_v2i64_v2i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v2i64_v2i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE41-NEXT: paddq %xmm1, %xmm0
+; SSE41-NEXT: movq %xmm0, %rax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_v2i64_v2i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+ %1 = icmp slt <2 x i64> %a0, zeroinitializer
+ %2 = sext <2 x i1> %1 to <2 x i64>
+ %3 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %2)
+ ret i64 %3
+}
+
+define i32 @test_v4i32_v4i1(<4 x i32> %a0) {
+; SSE2-LABEL: test_v4i32_v4i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v4i32_v4i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE41-NEXT: paddd %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: movd %xmm1, %eax
+; SSE41-NEXT: retq
+;
+; AVX1-SLOW-LABEL: test_v4i32_v4i1:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v4i32_v4i1:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vmovd %xmm0, %eax
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v4i32_v4i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v4i32_v4i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: retq
+ %1 = icmp slt <4 x i32> %a0, zeroinitializer
+ %2 = sext <4 x i1> %1 to <4 x i32>
+ %3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %2)
+ ret i32 %3
+}
+
+define i16 @test_v8i16_v8i1(<8 x i16> %a0) {
+; SSE2-LABEL: test_v8i16_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtw %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v8i16_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpgtw %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: movd %xmm0, %eax
+; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-SLOW-LABEL: test_v8i16_v8i1:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v8i16_v8i1:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vmovd %xmm0, %eax
+; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v8i16_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v8i16_v8i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i16> %a0, zeroinitializer
+ %2 = sext <8 x i1> %1 to <8 x i16>
+ %3 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %2)
+ ret i16 %3
+}
+
+define i8 @test_v16i8_v16i1(<16 x i8> %a0) {
+; SSE2-LABEL: test_v16i8_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: paddb %xmm2, %xmm0
+; SSE2-NEXT: psadbw %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v16i8_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpgtb %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE41-NEXT: paddb %xmm2, %xmm0
+; SSE41-NEXT: psadbw %xmm1, %xmm0
+; SSE41-NEXT: movd %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_v16i8_v16i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+ %1 = icmp slt <16 x i8> %a0, zeroinitializer
+ %2 = sext <16 x i1> %1 to <16 x i8>
+ %3 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %2)
+ ret i8 %3
+}
+
+define i8 @test_v32i8_v32i1(<32 x i8> %a0) {
+; SSE2-LABEL: test_v32i8_v32i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
+; SSE2-NEXT: paddb %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: psadbw %xmm2, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v32i8_v32i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpgtb %xmm0, %xmm1
+; SSE41-NEXT: paddb %xmm3, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE41-NEXT: paddb %xmm1, %xmm0
+; SSE41-NEXT: psadbw %xmm2, %xmm0
+; SSE41-NEXT: movd %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: test_v32i8_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsadbw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v32i8_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v32i8_v32i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <32 x i8> %a0, zeroinitializer
+ %2 = sext <32 x i1> %1 to <32 x i8>
+ %3 = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %2)
+ ret i8 %3
+}
+
+define i8 @test_v64i8_v64i1(<64 x i8> %a0) {
+; SSE2-LABEL: test_v64i8_v64i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pcmpgtb %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT: paddb %xmm0, %xmm3
+; SSE2-NEXT: paddb %xmm5, %xmm3
+; SSE2-NEXT: paddb %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: paddb %xmm3, %xmm0
+; SSE2-NEXT: psadbw %xmm4, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v64i8_v64i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: pcmpgtb %xmm2, %xmm5
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpgtb %xmm0, %xmm2
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pcmpgtb %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE41-NEXT: paddb %xmm0, %xmm3
+; SSE41-NEXT: paddb %xmm5, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE41-NEXT: paddb %xmm3, %xmm0
+; SSE41-NEXT: psadbw %xmm4, %xmm0
+; SSE41-NEXT: movd %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: test_v64i8_v64i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsadbw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v64i8_v64i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtb %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v64i8_v64i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovb2m %zmm0, %k0
+; AVX512-NEXT: vpmovm2b %k0, %zmm0
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <64 x i8> %a0, zeroinitializer
+ %2 = sext <64 x i1> %1 to <64 x i8>
+ %3 = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> %2)
+ ret i8 %3
+}
+
+define i8 @test_v128i8_v128i1(<128 x i8> %a0) {
+; SSE2-LABEL: test_v128i8_v128i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtb %xmm4, %xmm9
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtb %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pcmpgtb %xmm6, %xmm0
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm6
+; SSE2-NEXT: paddb %xmm0, %xmm6
+; SSE2-NEXT: paddb %xmm9, %xmm6
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pcmpgtb %xmm5, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtb %xmm7, %xmm1
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtb %xmm3, %xmm5
+; SSE2-NEXT: paddb %xmm1, %xmm5
+; SSE2-NEXT: paddb %xmm0, %xmm5
+; SSE2-NEXT: paddb %xmm2, %xmm5
+; SSE2-NEXT: paddb %xmm6, %xmm5
+; SSE2-NEXT: paddb %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE2-NEXT: paddb %xmm5, %xmm0
+; SSE2-NEXT: psadbw %xmm8, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v128i8_v128i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm8, %xmm8
+; SSE41-NEXT: pxor %xmm9, %xmm9
+; SSE41-NEXT: pcmpgtb %xmm4, %xmm9
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pcmpgtb %xmm0, %xmm4
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pcmpgtb %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm6, %xmm6
+; SSE41-NEXT: pcmpgtb %xmm2, %xmm6
+; SSE41-NEXT: paddb %xmm0, %xmm6
+; SSE41-NEXT: paddb %xmm9, %xmm6
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pcmpgtb %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpgtb %xmm1, %xmm2
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpgtb %xmm7, %xmm1
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: pcmpgtb %xmm3, %xmm5
+; SSE41-NEXT: paddb %xmm1, %xmm5
+; SSE41-NEXT: paddb %xmm0, %xmm5
+; SSE41-NEXT: paddb %xmm2, %xmm5
+; SSE41-NEXT: paddb %xmm6, %xmm5
+; SSE41-NEXT: paddb %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE41-NEXT: paddb %xmm5, %xmm0
+; SSE41-NEXT: psadbw %xmm8, %xmm0
+; SSE41-NEXT: movd %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: test_v128i8_v128i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm4, %xmm5
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm4, %xmm8
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm4, %xmm7
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm4, %xmm6
+; AVX1-NEXT: vpaddb %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpaddb %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsadbw %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v128i8_v128i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vpcmpgtb %ymm2, %ymm4, %ymm2
+; AVX2-NEXT: vpcmpgtb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpcmpgtb %ymm3, %ymm4, %ymm3
+; AVX2-NEXT: vpcmpgtb %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v128i8_v128i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovb2m %zmm0, %k0
+; AVX512-NEXT: vpmovb2m %zmm1, %k1
+; AVX512-NEXT: vpmovm2b %k1, %zmm0
+; AVX512-NEXT: vpmovm2b %k0, %zmm1
+; AVX512-NEXT: vpaddb %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <128 x i8> %a0, zeroinitializer
+ %2 = sext <128 x i1> %1 to <128 x i8>
+ %3 = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %2)
+ ret i8 %3
+}
+
declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
More information about the llvm-commits
mailing list