[llvm] ce47e13 - [X86] Add reduce_add(ctpop(x)) 'count all bits in a vector' tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 24 03:34:41 PDT 2023
Author: Simon Pilgrim
Date: 2023-07-24T11:34:08+01:00
New Revision: ce47e13bb9392fac697a79bab54baf439483db45
URL: https://github.com/llvm/llvm-project/commit/ce47e13bb9392fac697a79bab54baf439483db45
DIFF: https://github.com/llvm/llvm-project/commit/ce47e13bb9392fac697a79bab54baf439483db45.diff
LOG: [X86] Add reduce_add(ctpop(x)) 'count all bits in a vector' tests
Also add some basic buildvector variants: build_vector(reduce_add(ctpop(x0)), reduce_add(ctpop(x1)), ...)
Added:
llvm/test/CodeGen/X86/vector-reduce-ctpop.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/vector-reduce-ctpop.ll b/llvm/test/CodeGen/X86/vector-reduce-ctpop.ll
new file mode 100644
index 00000000000000..c2b38dbf1483db
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-reduce-ctpop.ll
@@ -0,0 +1,1243 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512vpopcntdq | FileCheck %s --check-prefixes=AVX512VPOPCNT
+
+
+;
+; Reductions of per-element ctpop results (count all bits in a vector)
+;
+
+define i64 @reduce_ctpop_v2i64(<2 x i64> %a0) {
+; SSE42-LABEL: reduce_ctpop_v2i64:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm0, %xmm2
+; SSE42-NEXT: pand %xmm1, %xmm2
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm3, %xmm4
+; SSE42-NEXT: pshufb %xmm2, %xmm4
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm1, %xmm0
+; SSE42-NEXT: pshufb %xmm0, %xmm3
+; SSE42-NEXT: paddb %xmm4, %xmm3
+; SSE42-NEXT: pxor %xmm0, %xmm0
+; SSE42-NEXT: psadbw %xmm3, %xmm0
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE42-NEXT: paddq %xmm0, %xmm1
+; SSE42-NEXT: movq %xmm1, %rax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v2i64:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512VL-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v2i64:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntq %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VPOPCNT-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovq %xmm0, %rax
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a0)
+ %r0 = tail call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %p0)
+ ret i64 %r0
+}
+
+define i32 @reduce_ctpop_v4i32(<4 x i32> %a0) {
+; SSE42-LABEL: reduce_ctpop_v4i32:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm0, %xmm2
+; SSE42-NEXT: pand %xmm1, %xmm2
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm3, %xmm4
+; SSE42-NEXT: pshufb %xmm2, %xmm4
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm1, %xmm0
+; SSE42-NEXT: pshufb %xmm0, %xmm3
+; SSE42-NEXT: paddb %xmm4, %xmm3
+; SSE42-NEXT: pxor %xmm0, %xmm0
+; SSE42-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
+; SSE42-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE42-NEXT: psadbw %xmm0, %xmm3
+; SSE42-NEXT: psadbw %xmm0, %xmm1
+; SSE42-NEXT: packuswb %xmm3, %xmm1
+; SSE42-NEXT: packuswb %xmm3, %xmm3
+; SSE42-NEXT: paddd %xmm1, %xmm3
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
+; SSE42-NEXT: paddd %xmm3, %xmm0
+; SSE42-NEXT: movd %xmm0, %eax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT: vpsadbw %xmm1, %xmm2, %xmm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm2, %xmm2, %xmm1
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v4i32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512VL-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm2, %xmm2
+; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpmovdb %xmm0, %xmm0
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovd %xmm0, %eax
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v4i32:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntd %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpmovdb %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovd %xmm0, %eax
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a0)
+ %r0 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %p0)
+ ret i32 %r0
+}
+
+define i16 @reduce_ctpop_v8i16(<8 x i16> %a0) {
+; SSE42-LABEL: reduce_ctpop_v8i16:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm0, %xmm2
+; SSE42-NEXT: pand %xmm1, %xmm2
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm3, %xmm4
+; SSE42-NEXT: pshufb %xmm2, %xmm4
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm1, %xmm0
+; SSE42-NEXT: pshufb %xmm0, %xmm3
+; SSE42-NEXT: paddb %xmm4, %xmm3
+; SSE42-NEXT: movdqa %xmm3, %xmm0
+; SSE42-NEXT: psllw $8, %xmm0
+; SSE42-NEXT: paddb %xmm3, %xmm0
+; SSE42-NEXT: psrlw $8, %xmm0
+; SSE42-NEXT: packuswb %xmm0, %xmm0
+; SSE42-NEXT: pxor %xmm1, %xmm1
+; SSE42-NEXT: psadbw %xmm0, %xmm1
+; SSE42-NEXT: movd %xmm1, %eax
+; SSE42-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v8i16:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512VL-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX512VL-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512VL-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovd %xmm0, %eax
+; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v8i16:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VPOPCNT-NEXT: vpopcntd %ymm0, %ymm0
+; AVX512VPOPCNT-NEXT: vpmovdb %ymm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovd %xmm0, %eax
+; AVX512VPOPCNT-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512VPOPCNT-NEXT: vzeroupper
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %a0)
+ %r0 = tail call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %p0)
+ ret i16 %r0
+}
+
+define i8 @reduce_ctpop_v16i8(<16 x i8> %a0) {
+; SSE42-LABEL: reduce_ctpop_v16i8:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm0, %xmm2
+; SSE42-NEXT: pand %xmm1, %xmm2
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm3, %xmm4
+; SSE42-NEXT: pshufb %xmm2, %xmm4
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm1, %xmm0
+; SSE42-NEXT: pshufb %xmm0, %xmm3
+; SSE42-NEXT: paddb %xmm4, %xmm3
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE42-NEXT: paddb %xmm3, %xmm0
+; SSE42-NEXT: pxor %xmm1, %xmm1
+; SSE42-NEXT: psadbw %xmm0, %xmm1
+; SSE42-NEXT: movd %xmm1, %eax
+; SSE42-NEXT: # kill: def $al killed $al killed $eax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v16i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512VL-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VL-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovd %xmm0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v16i8:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VPOPCNT-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNT-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VPOPCNT-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovd %xmm0, %eax
+; AVX512VPOPCNT-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VPOPCNT-NEXT: vzeroupper
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a0)
+ %r0 = tail call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %p0)
+ ret i8 %r0
+}
+
+define i64 @reduce_ctpop_v4i64(<4 x i64> %a0) {
+; SSE42-LABEL: reduce_ctpop_v4i64:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm1, %xmm3
+; SSE42-NEXT: pand %xmm2, %xmm3
+; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm4, %xmm5
+; SSE42-NEXT: pshufb %xmm3, %xmm5
+; SSE42-NEXT: psrlw $4, %xmm1
+; SSE42-NEXT: pand %xmm2, %xmm1
+; SSE42-NEXT: movdqa %xmm4, %xmm3
+; SSE42-NEXT: pshufb %xmm1, %xmm3
+; SSE42-NEXT: paddb %xmm5, %xmm3
+; SSE42-NEXT: pxor %xmm1, %xmm1
+; SSE42-NEXT: psadbw %xmm1, %xmm3
+; SSE42-NEXT: movdqa %xmm0, %xmm5
+; SSE42-NEXT: pand %xmm2, %xmm5
+; SSE42-NEXT: movdqa %xmm4, %xmm6
+; SSE42-NEXT: pshufb %xmm5, %xmm6
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm2, %xmm0
+; SSE42-NEXT: pshufb %xmm0, %xmm4
+; SSE42-NEXT: paddb %xmm6, %xmm4
+; SSE42-NEXT: psadbw %xmm1, %xmm4
+; SSE42-NEXT: paddq %xmm3, %xmm4
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE42-NEXT: paddq %xmm4, %xmm0
+; SSE42-NEXT: movq %xmm0, %rax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v4i64:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovqb %ymm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v4i64:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm0, %ymm0
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovq %xmm0, %rax
+; AVX512VPOPCNT-NEXT: vzeroupper
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a0)
+ %r0 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p0)
+ ret i64 %r0
+}
+
+define i32 @reduce_ctpop_v8i32(<8 x i32> %a0) {
+; SSE42-LABEL: reduce_ctpop_v8i32:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm1, %xmm4
+; SSE42-NEXT: pand %xmm3, %xmm4
+; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm2, %xmm5
+; SSE42-NEXT: pshufb %xmm4, %xmm5
+; SSE42-NEXT: psrlw $4, %xmm1
+; SSE42-NEXT: pand %xmm3, %xmm1
+; SSE42-NEXT: movdqa %xmm2, %xmm4
+; SSE42-NEXT: pshufb %xmm1, %xmm4
+; SSE42-NEXT: paddb %xmm5, %xmm4
+; SSE42-NEXT: pxor %xmm1, %xmm1
+; SSE42-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; SSE42-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; SSE42-NEXT: psadbw %xmm1, %xmm4
+; SSE42-NEXT: psadbw %xmm1, %xmm5
+; SSE42-NEXT: packuswb %xmm4, %xmm5
+; SSE42-NEXT: movdqa %xmm0, %xmm4
+; SSE42-NEXT: pand %xmm3, %xmm4
+; SSE42-NEXT: movdqa %xmm2, %xmm6
+; SSE42-NEXT: pshufb %xmm4, %xmm6
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm3, %xmm0
+; SSE42-NEXT: pshufb %xmm0, %xmm2
+; SSE42-NEXT: paddb %xmm6, %xmm2
+; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
+; SSE42-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE42-NEXT: psadbw %xmm1, %xmm2
+; SSE42-NEXT: psadbw %xmm1, %xmm0
+; SSE42-NEXT: packuswb %xmm2, %xmm0
+; SSE42-NEXT: paddd %xmm5, %xmm0
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE42-NEXT: paddd %xmm0, %xmm1
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
+; SSE42-NEXT: paddd %xmm1, %xmm0
+; SSE42-NEXT: movd %xmm0, %eax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX2-NEXT: vpsadbw %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v8i32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX512VL-NEXT: vpsadbw %ymm1, %ymm2, %ymm2
+; AVX512VL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX512VL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovd %xmm0, %eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v8i32:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntd %ymm0, %ymm0
+; AVX512VPOPCNT-NEXT: vpmovdb %ymm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovd %xmm0, %eax
+; AVX512VPOPCNT-NEXT: vzeroupper
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %a0)
+ %r0 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %p0)
+ ret i32 %r0
+}
+
+;
+; Vector of reductions of per-element ctpop results (create vector of each count all bits in each vector)
+;
+
+define <4 x i64> @reduce_ctpop_v4i64_buildvector_v4i64(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2, <4 x i64> %a3) nounwind {
+; SSE42-LABEL: reduce_ctpop_v4i64_buildvector_v4i64:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa %xmm0, %xmm8
+; SSE42-NEXT: movdqa {{.*#+}} xmm10 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm1, %xmm0
+; SSE42-NEXT: pand %xmm10, %xmm0
+; SSE42-NEXT: movdqa {{.*#+}} xmm9 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm9, %xmm11
+; SSE42-NEXT: pshufb %xmm0, %xmm11
+; SSE42-NEXT: psrlw $4, %xmm1
+; SSE42-NEXT: pand %xmm10, %xmm1
+; SSE42-NEXT: movdqa %xmm9, %xmm12
+; SSE42-NEXT: pshufb %xmm1, %xmm12
+; SSE42-NEXT: paddb %xmm11, %xmm12
+; SSE42-NEXT: pxor %xmm11, %xmm11
+; SSE42-NEXT: psadbw %xmm11, %xmm12
+; SSE42-NEXT: movdqa %xmm8, %xmm0
+; SSE42-NEXT: pand %xmm10, %xmm0
+; SSE42-NEXT: movdqa %xmm9, %xmm1
+; SSE42-NEXT: pshufb %xmm0, %xmm1
+; SSE42-NEXT: psrlw $4, %xmm8
+; SSE42-NEXT: pand %xmm10, %xmm8
+; SSE42-NEXT: movdqa %xmm9, %xmm0
+; SSE42-NEXT: pshufb %xmm8, %xmm0
+; SSE42-NEXT: paddb %xmm1, %xmm0
+; SSE42-NEXT: psadbw %xmm11, %xmm0
+; SSE42-NEXT: paddq %xmm12, %xmm0
+; SSE42-NEXT: movdqa %xmm3, %xmm1
+; SSE42-NEXT: pand %xmm10, %xmm1
+; SSE42-NEXT: movdqa %xmm9, %xmm8
+; SSE42-NEXT: pshufb %xmm1, %xmm8
+; SSE42-NEXT: psrlw $4, %xmm3
+; SSE42-NEXT: pand %xmm10, %xmm3
+; SSE42-NEXT: movdqa %xmm9, %xmm1
+; SSE42-NEXT: pshufb %xmm3, %xmm1
+; SSE42-NEXT: paddb %xmm8, %xmm1
+; SSE42-NEXT: psadbw %xmm11, %xmm1
+; SSE42-NEXT: movdqa %xmm2, %xmm3
+; SSE42-NEXT: pand %xmm10, %xmm3
+; SSE42-NEXT: movdqa %xmm9, %xmm8
+; SSE42-NEXT: pshufb %xmm3, %xmm8
+; SSE42-NEXT: psrlw $4, %xmm2
+; SSE42-NEXT: pand %xmm10, %xmm2
+; SSE42-NEXT: movdqa %xmm9, %xmm3
+; SSE42-NEXT: pshufb %xmm2, %xmm3
+; SSE42-NEXT: paddb %xmm8, %xmm3
+; SSE42-NEXT: psadbw %xmm11, %xmm3
+; SSE42-NEXT: paddq %xmm1, %xmm3
+; SSE42-NEXT: movdqa %xmm5, %xmm1
+; SSE42-NEXT: pand %xmm10, %xmm1
+; SSE42-NEXT: movdqa %xmm9, %xmm2
+; SSE42-NEXT: pshufb %xmm1, %xmm2
+; SSE42-NEXT: psrlw $4, %xmm5
+; SSE42-NEXT: pand %xmm10, %xmm5
+; SSE42-NEXT: movdqa %xmm9, %xmm8
+; SSE42-NEXT: pshufb %xmm5, %xmm8
+; SSE42-NEXT: paddb %xmm2, %xmm8
+; SSE42-NEXT: psadbw %xmm11, %xmm8
+; SSE42-NEXT: movdqa %xmm4, %xmm1
+; SSE42-NEXT: pand %xmm10, %xmm1
+; SSE42-NEXT: movdqa %xmm9, %xmm2
+; SSE42-NEXT: pshufb %xmm1, %xmm2
+; SSE42-NEXT: psrlw $4, %xmm4
+; SSE42-NEXT: pand %xmm10, %xmm4
+; SSE42-NEXT: movdqa %xmm9, %xmm1
+; SSE42-NEXT: pshufb %xmm4, %xmm1
+; SSE42-NEXT: paddb %xmm2, %xmm1
+; SSE42-NEXT: psadbw %xmm11, %xmm1
+; SSE42-NEXT: paddq %xmm8, %xmm1
+; SSE42-NEXT: movdqa %xmm7, %xmm2
+; SSE42-NEXT: pand %xmm10, %xmm2
+; SSE42-NEXT: movdqa %xmm9, %xmm4
+; SSE42-NEXT: pshufb %xmm2, %xmm4
+; SSE42-NEXT: psrlw $4, %xmm7
+; SSE42-NEXT: pand %xmm10, %xmm7
+; SSE42-NEXT: movdqa %xmm9, %xmm2
+; SSE42-NEXT: pshufb %xmm7, %xmm2
+; SSE42-NEXT: paddb %xmm4, %xmm2
+; SSE42-NEXT: psadbw %xmm11, %xmm2
+; SSE42-NEXT: movdqa %xmm6, %xmm4
+; SSE42-NEXT: pand %xmm10, %xmm4
+; SSE42-NEXT: movdqa %xmm9, %xmm5
+; SSE42-NEXT: pshufb %xmm4, %xmm5
+; SSE42-NEXT: psrlw $4, %xmm6
+; SSE42-NEXT: pand %xmm10, %xmm6
+; SSE42-NEXT: pshufb %xmm6, %xmm9
+; SSE42-NEXT: paddb %xmm5, %xmm9
+; SSE42-NEXT: psadbw %xmm11, %xmm9
+; SSE42-NEXT: paddq %xmm2, %xmm9
+; SSE42-NEXT: movdqa %xmm0, %xmm2
+; SSE42-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; SSE42-NEXT: paddq %xmm2, %xmm0
+; SSE42-NEXT: movdqa %xmm1, %xmm2
+; SSE42-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm9[1]
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm9[0]
+; SSE42-NEXT: paddq %xmm2, %xmm1
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v4i64_buildvector_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm5
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm5, %ymm6, %ymm5
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm6, %ymm0
+; AVX2-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX2-NEXT: vpsadbw %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm7
+; AVX2-NEXT: vpshufb %ymm7, %ymm6, %ymm7
+; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %ymm1, %ymm6, %ymm1
+; AVX2-NEXT: vpaddb %ymm7, %ymm1, %ymm1
+; AVX2-NEXT: vpsadbw %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm7
+; AVX2-NEXT: vpshufb %ymm7, %ymm6, %ymm7
+; AVX2-NEXT: vpsrlw $4, %ymm2, %ymm2
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpshufb %ymm2, %ymm6, %ymm2
+; AVX2-NEXT: vpaddb %ymm7, %ymm2, %ymm2
+; AVX2-NEXT: vpsadbw %ymm5, %ymm2, %ymm2
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm7
+; AVX2-NEXT: vpshufb %ymm7, %ymm6, %ymm7
+; AVX2-NEXT: vpsrlw $4, %ymm3, %ymm3
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpshufb %ymm3, %ymm6, %ymm3
+; AVX2-NEXT: vpaddb %ymm7, %ymm3, %ymm3
+; AVX2-NEXT: vpsadbw %ymm5, %ymm3, %ymm3
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT: vpaddq %xmm4, %xmm3, %xmm3
+; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm2[1],xmm3[1]
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
+; AVX2-NEXT: vpaddq %xmm5, %xmm1, %xmm1
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpaddq %xmm5, %xmm0, %xmm0
+; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm0[1],xmm1[1]
+; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v4i64_buildvector_v4i64:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm5
+; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512VL-NEXT: vpshufb %ymm5, %ymm6, %ymm5
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpshufb %ymm0, %ymm6, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-NEXT: vpsadbw %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm7
+; AVX512VL-NEXT: vpshufb %ymm7, %ymm6, %ymm7
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm6, %ymm1
+; AVX512VL-NEXT: vpaddb %ymm7, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsadbw %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm7
+; AVX512VL-NEXT: vpshufb %ymm7, %ymm6, %ymm7
+; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm6, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsadbw %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm7
+; AVX512VL-NEXT: vpshufb %ymm7, %ymm6, %ymm7
+; AVX512VL-NEXT: vpsrlw $4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpshufb %ymm3, %ymm6, %ymm3
+; AVX512VL-NEXT: vpaddb %ymm7, %ymm3, %ymm3
+; AVX512VL-NEXT: vpsadbw %ymm5, %ymm3, %ymm3
+; AVX512VL-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX512VL-NEXT: vpaddq %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm2[1],xmm3[1]
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm5
+; AVX512VL-NEXT: vpaddq %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX512VL-NEXT: vpaddq %xmm5, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm0[1],xmm1[1]
+; AVX512VL-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v4i64_buildvector_v4i64:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm0, %ymm0
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm1, %ymm1
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm2, %ymm2
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm3, %ymm3
+; AVX512VPOPCNT-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX512VPOPCNT-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; AVX512VPOPCNT-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX512VPOPCNT-NEXT: vpaddq %xmm4, %xmm3, %xmm3
+; AVX512VPOPCNT-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm2[1],xmm3[1]
+; AVX512VPOPCNT-NEXT: vextracti128 $1, %ymm1, %xmm5
+; AVX512VPOPCNT-NEXT: vpaddq %xmm5, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX512VPOPCNT-NEXT: vpaddq %xmm5, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm0[1],xmm1[1]
+; AVX512VPOPCNT-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512VPOPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX512VPOPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VPOPCNT-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VPOPCNT-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a0)
+ %p1 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a1)
+ %p2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a2)
+ %p3 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a3)
+ %r0 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p0)
+ %r1 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p1)
+ %r2 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p2)
+ %r3 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p3)
+ %v0 = insertelement <4 x i64> undef, i64 %r0, i64 0
+ %v1 = insertelement <4 x i64> %v0, i64 %r1, i64 1
+ %v2 = insertelement <4 x i64> %v1, i64 %r2, i64 2
+ %v3 = insertelement <4 x i64> %v2, i64 %r3, i64 3
+ ret <4 x i64> %v3
+}
+
+define <8 x i32> @reduce_ctpop_v4i64_buildvector_v8i32(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2, <4 x i64> %a3, <4 x i64> %a4, <4 x i64> %a5, <4 x i64> %a6, <4 x i64> %a7) nounwind {
+; SSE42-LABEL: reduce_ctpop_v4i64_buildvector_v8i32:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
+; SSE42-NEXT: movdqa {{.*#+}} xmm10 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm1, %xmm12
+; SSE42-NEXT: pand %xmm10, %xmm12
+; SSE42-NEXT: movdqa {{.*#+}} xmm8 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm12, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm1
+; SSE42-NEXT: pand %xmm10, %xmm1
+; SSE42-NEXT: movdqa %xmm8, %xmm12
+; SSE42-NEXT: pshufb %xmm1, %xmm12
+; SSE42-NEXT: paddb %xmm13, %xmm12
+; SSE42-NEXT: movdqa %xmm0, %xmm1
+; SSE42-NEXT: pand %xmm10, %xmm1
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm1, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm10, %xmm0
+; SSE42-NEXT: movdqa %xmm8, %xmm1
+; SSE42-NEXT: pshufb %xmm0, %xmm1
+; SSE42-NEXT: pxor %xmm0, %xmm0
+; SSE42-NEXT: psadbw %xmm0, %xmm12
+; SSE42-NEXT: paddb %xmm13, %xmm1
+; SSE42-NEXT: psadbw %xmm0, %xmm1
+; SSE42-NEXT: paddq %xmm12, %xmm1
+; SSE42-NEXT: movdqa %xmm3, %xmm12
+; SSE42-NEXT: pand %xmm10, %xmm12
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm12, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm3
+; SSE42-NEXT: pand %xmm10, %xmm3
+; SSE42-NEXT: movdqa %xmm8, %xmm14
+; SSE42-NEXT: pshufb %xmm3, %xmm14
+; SSE42-NEXT: paddb %xmm13, %xmm14
+; SSE42-NEXT: movdqa %xmm2, %xmm3
+; SSE42-NEXT: pand %xmm10, %xmm3
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm3, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm2
+; SSE42-NEXT: pand %xmm10, %xmm2
+; SSE42-NEXT: movdqa %xmm8, %xmm3
+; SSE42-NEXT: pshufb %xmm2, %xmm3
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12
+; SSE42-NEXT: psadbw %xmm0, %xmm14
+; SSE42-NEXT: paddb %xmm13, %xmm3
+; SSE42-NEXT: psadbw %xmm0, %xmm3
+; SSE42-NEXT: paddq %xmm14, %xmm3
+; SSE42-NEXT: movdqa %xmm5, %xmm2
+; SSE42-NEXT: pand %xmm10, %xmm2
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm2, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm5
+; SSE42-NEXT: pand %xmm10, %xmm5
+; SSE42-NEXT: movdqa %xmm8, %xmm14
+; SSE42-NEXT: pshufb %xmm5, %xmm14
+; SSE42-NEXT: paddb %xmm13, %xmm14
+; SSE42-NEXT: movdqa %xmm4, %xmm2
+; SSE42-NEXT: pand %xmm10, %xmm2
+; SSE42-NEXT: movdqa %xmm8, %xmm5
+; SSE42-NEXT: pshufb %xmm2, %xmm5
+; SSE42-NEXT: psrlw $4, %xmm4
+; SSE42-NEXT: pand %xmm10, %xmm4
+; SSE42-NEXT: movdqa %xmm8, %xmm2
+; SSE42-NEXT: pshufb %xmm4, %xmm2
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13
+; SSE42-NEXT: psadbw %xmm0, %xmm14
+; SSE42-NEXT: paddb %xmm5, %xmm2
+; SSE42-NEXT: psadbw %xmm0, %xmm2
+; SSE42-NEXT: paddq %xmm14, %xmm2
+; SSE42-NEXT: movdqa %xmm7, %xmm4
+; SSE42-NEXT: pand %xmm10, %xmm4
+; SSE42-NEXT: movdqa %xmm8, %xmm5
+; SSE42-NEXT: pshufb %xmm4, %xmm5
+; SSE42-NEXT: psrlw $4, %xmm7
+; SSE42-NEXT: pand %xmm10, %xmm7
+; SSE42-NEXT: movdqa %xmm8, %xmm14
+; SSE42-NEXT: pshufb %xmm7, %xmm14
+; SSE42-NEXT: paddb %xmm5, %xmm14
+; SSE42-NEXT: movdqa %xmm6, %xmm4
+; SSE42-NEXT: pand %xmm10, %xmm4
+; SSE42-NEXT: movdqa %xmm8, %xmm5
+; SSE42-NEXT: pshufb %xmm4, %xmm5
+; SSE42-NEXT: psrlw $4, %xmm6
+; SSE42-NEXT: pand %xmm10, %xmm6
+; SSE42-NEXT: movdqa %xmm8, %xmm4
+; SSE42-NEXT: pshufb %xmm6, %xmm4
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6
+; SSE42-NEXT: psadbw %xmm0, %xmm14
+; SSE42-NEXT: paddb %xmm5, %xmm4
+; SSE42-NEXT: psadbw %xmm0, %xmm4
+; SSE42-NEXT: paddq %xmm14, %xmm4
+; SSE42-NEXT: movdqa %xmm6, %xmm5
+; SSE42-NEXT: pand %xmm10, %xmm5
+; SSE42-NEXT: movdqa %xmm8, %xmm7
+; SSE42-NEXT: pshufb %xmm5, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm6
+; SSE42-NEXT: pand %xmm10, %xmm6
+; SSE42-NEXT: movdqa %xmm8, %xmm14
+; SSE42-NEXT: pshufb %xmm6, %xmm14
+; SSE42-NEXT: paddb %xmm7, %xmm14
+; SSE42-NEXT: movdqa %xmm13, %xmm5
+; SSE42-NEXT: pand %xmm10, %xmm5
+; SSE42-NEXT: movdqa %xmm8, %xmm6
+; SSE42-NEXT: pshufb %xmm5, %xmm6
+; SSE42-NEXT: psrlw $4, %xmm13
+; SSE42-NEXT: pand %xmm10, %xmm13
+; SSE42-NEXT: movdqa %xmm8, %xmm5
+; SSE42-NEXT: pshufb %xmm13, %xmm5
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
+; SSE42-NEXT: psadbw %xmm0, %xmm14
+; SSE42-NEXT: paddb %xmm6, %xmm5
+; SSE42-NEXT: psadbw %xmm0, %xmm5
+; SSE42-NEXT: paddq %xmm14, %xmm5
+; SSE42-NEXT: movdqa %xmm7, %xmm6
+; SSE42-NEXT: pand %xmm10, %xmm6
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm6, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm7
+; SSE42-NEXT: pand %xmm10, %xmm7
+; SSE42-NEXT: movdqa %xmm8, %xmm14
+; SSE42-NEXT: pshufb %xmm7, %xmm14
+; SSE42-NEXT: paddb %xmm13, %xmm14
+; SSE42-NEXT: movdqa %xmm12, %xmm6
+; SSE42-NEXT: pand %xmm10, %xmm6
+; SSE42-NEXT: movdqa %xmm8, %xmm7
+; SSE42-NEXT: pshufb %xmm6, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm12
+; SSE42-NEXT: pand %xmm10, %xmm12
+; SSE42-NEXT: movdqa %xmm8, %xmm6
+; SSE42-NEXT: pshufb %xmm12, %xmm6
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12
+; SSE42-NEXT: psadbw %xmm0, %xmm14
+; SSE42-NEXT: paddb %xmm7, %xmm6
+; SSE42-NEXT: psadbw %xmm0, %xmm6
+; SSE42-NEXT: paddq %xmm14, %xmm6
+; SSE42-NEXT: movdqa %xmm12, %xmm7
+; SSE42-NEXT: pand %xmm10, %xmm7
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm7, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm12
+; SSE42-NEXT: pand %xmm10, %xmm12
+; SSE42-NEXT: movdqa %xmm8, %xmm14
+; SSE42-NEXT: pshufb %xmm12, %xmm14
+; SSE42-NEXT: paddb %xmm13, %xmm14
+; SSE42-NEXT: movdqa %xmm11, %xmm7
+; SSE42-NEXT: pand %xmm10, %xmm7
+; SSE42-NEXT: movdqa %xmm8, %xmm12
+; SSE42-NEXT: pshufb %xmm7, %xmm12
+; SSE42-NEXT: psrlw $4, %xmm11
+; SSE42-NEXT: pand %xmm10, %xmm11
+; SSE42-NEXT: movdqa %xmm8, %xmm7
+; SSE42-NEXT: pshufb %xmm11, %xmm7
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
+; SSE42-NEXT: psadbw %xmm0, %xmm14
+; SSE42-NEXT: paddb %xmm12, %xmm7
+; SSE42-NEXT: psadbw %xmm0, %xmm7
+; SSE42-NEXT: paddq %xmm14, %xmm7
+; SSE42-NEXT: movdqa %xmm11, %xmm12
+; SSE42-NEXT: pand %xmm10, %xmm12
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm12, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm11
+; SSE42-NEXT: pand %xmm10, %xmm11
+; SSE42-NEXT: movdqa %xmm8, %xmm12
+; SSE42-NEXT: pshufb %xmm11, %xmm12
+; SSE42-NEXT: paddb %xmm13, %xmm12
+; SSE42-NEXT: movdqa %xmm9, %xmm11
+; SSE42-NEXT: pand %xmm10, %xmm11
+; SSE42-NEXT: movdqa %xmm8, %xmm13
+; SSE42-NEXT: pshufb %xmm11, %xmm13
+; SSE42-NEXT: psrlw $4, %xmm9
+; SSE42-NEXT: pand %xmm10, %xmm9
+; SSE42-NEXT: pshufb %xmm9, %xmm8
+; SSE42-NEXT: paddb %xmm13, %xmm8
+; SSE42-NEXT: psadbw %xmm0, %xmm12
+; SSE42-NEXT: psadbw %xmm0, %xmm8
+; SSE42-NEXT: paddq %xmm12, %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE42-NEXT: paddq %xmm1, %xmm0
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
+; SSE42-NEXT: paddq %xmm3, %xmm1
+; SSE42-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE42-NEXT: paddq %xmm2, %xmm1
+; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
+; SSE42-NEXT: paddq %xmm4, %xmm2
+; SSE42-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
+; SSE42-NEXT: paddq %xmm5, %xmm1
+; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm6[2,3,2,3]
+; SSE42-NEXT: paddq %xmm6, %xmm2
+; SSE42-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,2,3]
+; SSE42-NEXT: paddq %xmm7, %xmm2
+; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm8[2,3,2,3]
+; SSE42-NEXT: paddq %xmm8, %xmm3
+; SSE42-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v4i64_buildvector_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm0, %ymm8, %ymm10
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: # ymm9 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm10, %ymm9, %ymm10
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm0, %ymm8, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm9, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm10, %ymm0
+; AVX2-NEXT: vpxor %xmm10, %xmm10, %xmm10
+; AVX2-NEXT: vpsadbw %ymm0, %ymm10, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm8, %ymm11
+; AVX2-NEXT: vpshufb %ymm11, %ymm9, %ymm11
+; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm8, %ymm1
+; AVX2-NEXT: vpshufb %ymm1, %ymm9, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm11, %ymm1
+; AVX2-NEXT: vpsadbw %ymm1, %ymm10, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm8, %ymm11
+; AVX2-NEXT: vpshufb %ymm11, %ymm9, %ymm11
+; AVX2-NEXT: vpsrlw $4, %ymm2, %ymm2
+; AVX2-NEXT: vpand %ymm2, %ymm8, %ymm2
+; AVX2-NEXT: vpshufb %ymm2, %ymm9, %ymm2
+; AVX2-NEXT: vpaddb %ymm2, %ymm11, %ymm2
+; AVX2-NEXT: vpsadbw %ymm2, %ymm10, %ymm2
+; AVX2-NEXT: vpand %ymm3, %ymm8, %ymm11
+; AVX2-NEXT: vpshufb %ymm11, %ymm9, %ymm11
+; AVX2-NEXT: vpsrlw $4, %ymm3, %ymm3
+; AVX2-NEXT: vpand %ymm3, %ymm8, %ymm3
+; AVX2-NEXT: vpshufb %ymm3, %ymm9, %ymm3
+; AVX2-NEXT: vpaddb %ymm3, %ymm11, %ymm3
+; AVX2-NEXT: vpsadbw %ymm3, %ymm10, %ymm3
+; AVX2-NEXT: vpand %ymm4, %ymm8, %ymm11
+; AVX2-NEXT: vpshufb %ymm11, %ymm9, %ymm11
+; AVX2-NEXT: vpsrlw $4, %ymm4, %ymm4
+; AVX2-NEXT: vpand %ymm4, %ymm8, %ymm4
+; AVX2-NEXT: vpshufb %ymm4, %ymm9, %ymm4
+; AVX2-NEXT: vpaddb %ymm4, %ymm11, %ymm4
+; AVX2-NEXT: vpsadbw %ymm4, %ymm10, %ymm4
+; AVX2-NEXT: vpand %ymm5, %ymm8, %ymm11
+; AVX2-NEXT: vpshufb %ymm11, %ymm9, %ymm11
+; AVX2-NEXT: vpsrlw $4, %ymm5, %ymm5
+; AVX2-NEXT: vpand %ymm5, %ymm8, %ymm5
+; AVX2-NEXT: vpshufb %ymm5, %ymm9, %ymm5
+; AVX2-NEXT: vpaddb %ymm5, %ymm11, %ymm5
+; AVX2-NEXT: vpsadbw %ymm5, %ymm10, %ymm5
+; AVX2-NEXT: vpand %ymm6, %ymm8, %ymm11
+; AVX2-NEXT: vpshufb %ymm11, %ymm9, %ymm11
+; AVX2-NEXT: vpsrlw $4, %ymm6, %ymm6
+; AVX2-NEXT: vpand %ymm6, %ymm8, %ymm6
+; AVX2-NEXT: vpshufb %ymm6, %ymm9, %ymm6
+; AVX2-NEXT: vpaddb %ymm6, %ymm11, %ymm6
+; AVX2-NEXT: vpsadbw %ymm6, %ymm10, %ymm6
+; AVX2-NEXT: vpand %ymm7, %ymm8, %ymm11
+; AVX2-NEXT: vpshufb %ymm11, %ymm9, %ymm11
+; AVX2-NEXT: vpsrlw $4, %ymm7, %ymm7
+; AVX2-NEXT: vpand %ymm7, %ymm8, %ymm7
+; AVX2-NEXT: vpshufb %ymm7, %ymm9, %ymm7
+; AVX2-NEXT: vpaddb %ymm7, %ymm11, %ymm7
+; AVX2-NEXT: vpsadbw %ymm7, %ymm10, %ymm7
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm8
+; AVX2-NEXT: vpaddq %xmm0, %xmm8, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm9
+; AVX2-NEXT: vpaddq %xmm1, %xmm9, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm9 = xmm1[2,3,2,3]
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm10
+; AVX2-NEXT: vpaddq %xmm2, %xmm10, %xmm2
+; AVX2-NEXT: vpshufd {{.*#+}} xmm10 = xmm2[2,3,2,3]
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm11
+; AVX2-NEXT: vpaddq %xmm3, %xmm11, %xmm3
+; AVX2-NEXT: vpshufd {{.*#+}} xmm11 = xmm3[2,3,2,3]
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm12
+; AVX2-NEXT: vpaddq %xmm4, %xmm12, %xmm4
+; AVX2-NEXT: vpshufd {{.*#+}} xmm12 = xmm4[2,3,2,3]
+; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm13
+; AVX2-NEXT: vpaddq %xmm5, %xmm13, %xmm5
+; AVX2-NEXT: vpshufd {{.*#+}} xmm13 = xmm5[2,3,2,3]
+; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm14
+; AVX2-NEXT: vpaddq %xmm6, %xmm14, %xmm6
+; AVX2-NEXT: vpshufd {{.*#+}} xmm14 = xmm6[2,3,2,3]
+; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm15
+; AVX2-NEXT: vpaddq %xmm7, %xmm15, %xmm7
+; AVX2-NEXT: vpshufd {{.*#+}} xmm15 = xmm7[2,3,2,3]
+; AVX2-NEXT: vpaddq %xmm0, %xmm8, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm9, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vpaddq %xmm2, %xmm10, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: vpaddq %xmm3, %xmm11, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %edx
+; AVX2-NEXT: vpaddq %xmm4, %xmm12, %xmm1
+; AVX2-NEXT: vpaddq %xmm5, %xmm13, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %esi
+; AVX2-NEXT: vpaddq %xmm6, %xmm14, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %edi
+; AVX2-NEXT: vpaddq %xmm7, %xmm15, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %r8d
+; AVX2-NEXT: vpinsrd $1, %esi, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrd $2, %edi, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrd $3, %r8d, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v4i64_buildvector_v8i32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} ymm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm0, %ymm8, %ymm9
+; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512VL-NEXT: vpshufb %ymm9, %ymm10, %ymm9
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm0, %ymm8, %ymm0
+; AVX512VL-NEXT: vpshufb %ymm0, %ymm10, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm9, %ymm0
+; AVX512VL-NEXT: vpxor %xmm9, %xmm9, %xmm9
+; AVX512VL-NEXT: vpsadbw %ymm0, %ymm9, %ymm0
+; AVX512VL-NEXT: vpand %ymm1, %ymm8, %ymm11
+; AVX512VL-NEXT: vpshufb %ymm11, %ymm10, %ymm11
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm1, %ymm8, %ymm1
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm10, %ymm1
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm11, %ymm1
+; AVX512VL-NEXT: vpsadbw %ymm1, %ymm9, %ymm1
+; AVX512VL-NEXT: vpand %ymm2, %ymm8, %ymm11
+; AVX512VL-NEXT: vpshufb %ymm11, %ymm10, %ymm11
+; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm2, %ymm8, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm10, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm2, %ymm11, %ymm2
+; AVX512VL-NEXT: vpsadbw %ymm2, %ymm9, %ymm2
+; AVX512VL-NEXT: vpand %ymm3, %ymm8, %ymm11
+; AVX512VL-NEXT: vpshufb %ymm11, %ymm10, %ymm11
+; AVX512VL-NEXT: vpsrlw $4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpand %ymm3, %ymm8, %ymm3
+; AVX512VL-NEXT: vpshufb %ymm3, %ymm10, %ymm3
+; AVX512VL-NEXT: vpaddb %ymm3, %ymm11, %ymm3
+; AVX512VL-NEXT: vpsadbw %ymm3, %ymm9, %ymm3
+; AVX512VL-NEXT: vpand %ymm4, %ymm8, %ymm11
+; AVX512VL-NEXT: vpshufb %ymm11, %ymm10, %ymm11
+; AVX512VL-NEXT: vpsrlw $4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm4, %ymm8, %ymm4
+; AVX512VL-NEXT: vpshufb %ymm4, %ymm10, %ymm4
+; AVX512VL-NEXT: vpaddb %ymm4, %ymm11, %ymm4
+; AVX512VL-NEXT: vpsadbw %ymm4, %ymm9, %ymm4
+; AVX512VL-NEXT: vpand %ymm5, %ymm8, %ymm11
+; AVX512VL-NEXT: vpshufb %ymm11, %ymm10, %ymm11
+; AVX512VL-NEXT: vpsrlw $4, %ymm5, %ymm5
+; AVX512VL-NEXT: vpand %ymm5, %ymm8, %ymm5
+; AVX512VL-NEXT: vpshufb %ymm5, %ymm10, %ymm5
+; AVX512VL-NEXT: vpaddb %ymm5, %ymm11, %ymm5
+; AVX512VL-NEXT: vpsadbw %ymm5, %ymm9, %ymm5
+; AVX512VL-NEXT: vpand %ymm6, %ymm8, %ymm11
+; AVX512VL-NEXT: vpshufb %ymm11, %ymm10, %ymm11
+; AVX512VL-NEXT: vpsrlw $4, %ymm6, %ymm6
+; AVX512VL-NEXT: vpand %ymm6, %ymm8, %ymm6
+; AVX512VL-NEXT: vpshufb %ymm6, %ymm10, %ymm6
+; AVX512VL-NEXT: vpaddb %ymm6, %ymm11, %ymm6
+; AVX512VL-NEXT: vpsadbw %ymm6, %ymm9, %ymm6
+; AVX512VL-NEXT: vpand %ymm7, %ymm8, %ymm11
+; AVX512VL-NEXT: vpshufb %ymm11, %ymm10, %ymm11
+; AVX512VL-NEXT: vpsrlw $4, %ymm7, %ymm7
+; AVX512VL-NEXT: vpand %ymm7, %ymm8, %ymm7
+; AVX512VL-NEXT: vpshufb %ymm7, %ymm10, %ymm7
+; AVX512VL-NEXT: vpaddb %ymm7, %ymm11, %ymm7
+; AVX512VL-NEXT: vpsadbw %ymm7, %ymm9, %ymm7
+; AVX512VL-NEXT: vpmovqb %ymm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm8, %xmm8, %xmm8
+; AVX512VL-NEXT: vpsadbw %xmm0, %xmm8, %xmm0
+; AVX512VL-NEXT: vpmovqb %ymm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm8, %xmm1
+; AVX512VL-NEXT: vpmovqb %ymm2, %xmm2
+; AVX512VL-NEXT: vpsadbw %xmm2, %xmm8, %xmm2
+; AVX512VL-NEXT: vpmovqb %ymm3, %xmm3
+; AVX512VL-NEXT: vpsadbw %xmm3, %xmm8, %xmm3
+; AVX512VL-NEXT: vpmovqb %ymm4, %xmm4
+; AVX512VL-NEXT: vpsadbw %xmm4, %xmm8, %xmm4
+; AVX512VL-NEXT: vpmovqb %ymm5, %xmm5
+; AVX512VL-NEXT: vpsadbw %xmm5, %xmm8, %xmm5
+; AVX512VL-NEXT: vpmovqb %ymm6, %xmm6
+; AVX512VL-NEXT: vpsadbw %xmm6, %xmm8, %xmm6
+; AVX512VL-NEXT: vpmovqb %ymm7, %xmm7
+; AVX512VL-NEXT: vpsadbw %xmm7, %xmm8, %xmm7
+; AVX512VL-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm6
+; AVX512VL-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4
+; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,4,8,12,0,4,8,12]
+; AVX512VL-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512VL-NEXT: vpermi2d %ymm6, %ymm4, %ymm5
+; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512VL-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX512VL-NEXT: vpbroadcastq {{.*#+}} xmm2 = [0,4,0,4]
+; AVX512VL-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v4i64_buildvector_v8i32:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm0, %ymm0
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm1, %ymm1
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm2, %ymm2
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm3, %ymm3
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm4, %ymm4
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm5, %ymm5
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm6, %ymm6
+; AVX512VPOPCNT-NEXT: vpopcntq %ymm7, %ymm7
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpxor %xmm8, %xmm8, %xmm8
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm0, %xmm8, %xmm0
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm8, %xmm1
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm2, %xmm2
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm2, %xmm8, %xmm2
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm3, %xmm3
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm3, %xmm8, %xmm3
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm4, %xmm4
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm4, %xmm8, %xmm4
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm5, %xmm5
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm5, %xmm8, %xmm5
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm6, %xmm6
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm6, %xmm8, %xmm6
+; AVX512VPOPCNT-NEXT: vpmovqb %ymm7, %xmm7
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm7, %xmm8, %xmm7
+; AVX512VPOPCNT-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm6
+; AVX512VPOPCNT-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4
+; AVX512VPOPCNT-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,4,8,12,0,4,8,12]
+; AVX512VPOPCNT-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512VPOPCNT-NEXT: vpermi2d %ymm6, %ymm4, %ymm5
+; AVX512VPOPCNT-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512VPOPCNT-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX512VPOPCNT-NEXT: vpbroadcastq {{.*#+}} xmm2 = [0,4,0,4]
+; AVX512VPOPCNT-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX512VPOPCNT-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512VPOPCNT-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a0)
+ %p1 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a1)
+ %p2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a2)
+ %p3 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a3)
+ %p4 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a4)
+ %p5 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a5)
+ %p6 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a6)
+ %p7 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a7)
+ %r0 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p0)
+ %r1 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p1)
+ %r2 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p2)
+ %r3 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p3)
+ %r4 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p4)
+ %r5 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p5)
+ %r6 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p6)
+ %r7 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %p7)
+ %t0 = trunc i64 %r0 to i32
+ %t1 = trunc i64 %r1 to i32
+ %t2 = trunc i64 %r2 to i32
+ %t3 = trunc i64 %r3 to i32
+ %t4 = trunc i64 %r4 to i32
+ %t5 = trunc i64 %r5 to i32
+ %t6 = trunc i64 %r6 to i32
+ %t7 = trunc i64 %r7 to i32
+ %v0 = insertelement <8 x i32> undef, i32 %t0, i64 0
+ %v1 = insertelement <8 x i32> %v0, i32 %t1, i64 1
+ %v2 = insertelement <8 x i32> %v1, i32 %t2, i64 2
+ %v3 = insertelement <8 x i32> %v2, i32 %t3, i64 3
+ %v4 = insertelement <8 x i32> %v3, i32 %t4, i64 4
+ %v5 = insertelement <8 x i32> %v4, i32 %t5, i64 5
+ %v6 = insertelement <8 x i32> %v5, i32 %t6, i64 6
+ %v7 = insertelement <8 x i32> %v6, i32 %t7, i64 7
+ ret <8 x i32> %v7
+}
+
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
+
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
More information about the llvm-commits
mailing list