[llvm] 89e968f - [X86] Pre-checkin test case for combining const operand to VNNI

via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 15 22:27:47 PST 2022


Author: Luo, Yuanke
Date: 2022-01-16T14:06:49+08:00
New Revision: 89e968fe8e12f4b83ed911d06650a5b03c0509da

URL: https://github.com/llvm/llvm-project/commit/89e968fe8e12f4b83ed911d06650a5b03c0509da
DIFF: https://github.com/llvm/llvm-project/commit/89e968fe8e12f4b83ed911d06650a5b03c0509da.diff

LOG: [X86] Pre-checkin test case for combining const operand to VNNI
instruction.

Added: 
    llvm/test/CodeGen/X86/dpbusd_const.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/dpbusd_const.ll b/llvm/test/CodeGen/X86/dpbusd_const.ll
new file mode 100644
index 0000000000000..d0cefc5ef3b72
--- /dev/null
+++ b/llvm/test/CodeGen/X86/dpbusd_const.ll
@@ -0,0 +1,285 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=ALL,AVXVNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VLVNNI
+
+define i32 @mul_4xi8_zc_exceed(<4 x i8> %a, i32 %c) {
+; ALL-LABEL: mul_4xi8_zc_exceed:
+; ALL:       # %bb.0: # %entry
+; ALL-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; ALL-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    vmovd %xmm0, %eax
+; ALL-NEXT:    addl %edi, %eax
+; ALL-NEXT:    retq
+entry:
+  %0 = zext <4 x i8> %a to <4 x i32>
+  %1 = mul nsw <4 x i32> %0, <i32 0, i32 1, i32 2, i32 128>
+  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
+  %op.extra = add nsw i32 %2, %c
+  ret i32 %op.extra
+}
+
+define i32 @mul_4xi8_zc(<4 x i8> %a, i32 %c) {
+; ALL-LABEL: mul_4xi8_zc:
+; ALL:       # %bb.0: # %entry
+; ALL-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; ALL-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    vmovd %xmm0, %eax
+; ALL-NEXT:    addl %edi, %eax
+; ALL-NEXT:    retq
+entry:
+  %0 = zext <4 x i8> %a to <4 x i32>
+  %1 = mul nsw <4 x i32> %0, <i32 0, i32 1, i32 2, i32 127>
+  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
+  %op.extra = add nsw i32 %2, %c
+  ret i32 %op.extra
+}
+
+define i32 @mul_4xi4_cz(<4 x i4> %a, i32 %c) {
+; AVXVNNI-LABEL: mul_4xi4_cz:
+; AVXVNNI:       # %bb.0: # %entry
+; AVXVNNI-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
+; AVXVNNI-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vmovd %xmm0, %eax
+; AVXVNNI-NEXT:    addl %edi, %eax
+; AVXVNNI-NEXT:    retq
+;
+; AVX512VNNI-LABEL: mul_4xi4_cz:
+; AVX512VNNI:       # %bb.0: # %entry
+; AVX512VNNI-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
+; AVX512VNNI-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512VNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vmovd %xmm0, %eax
+; AVX512VNNI-NEXT:    addl %edi, %eax
+; AVX512VNNI-NEXT:    retq
+;
+; AVX512VLVNNI-LABEL: mul_4xi4_cz:
+; AVX512VLVNNI:       # %bb.0: # %entry
+; AVX512VLVNNI-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VLVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512VLVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vmovd %xmm0, %eax
+; AVX512VLVNNI-NEXT:    addl %edi, %eax
+; AVX512VLVNNI-NEXT:    retq
+entry:
+  %0 = zext <4 x i4> %a to <4 x i32>
+  %1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 127>, %0
+  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
+  %op.extra = add nsw i32 %2, %c
+  ret i32 %op.extra
+}
+
+define i32 @mul_4xi8_cs(<4 x i8> %a, i32 %c) {
+; ALL-LABEL: mul_4xi8_cs:
+; ALL:       # %bb.0: # %entry
+; ALL-NEXT:    vpmovsxbd %xmm0, %xmm0
+; ALL-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    vmovd %xmm0, %eax
+; ALL-NEXT:    addl %edi, %eax
+; ALL-NEXT:    retq
+entry:
+  %0 = sext <4 x i8> %a to <4 x i32>
+  %1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 255>, %0
+  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
+  %op.extra = add nsw i32 %2, %c
+  ret i32 %op.extra
+}
+
+define i32 @mul_4xi8_cs_exceed(<4 x i8> %a, i32 %c) {
+; ALL-LABEL: mul_4xi8_cs_exceed:
+; ALL:       # %bb.0: # %entry
+; ALL-NEXT:    vpmovsxbd %xmm0, %xmm0
+; ALL-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    vmovd %xmm0, %eax
+; ALL-NEXT:    addl %edi, %eax
+; ALL-NEXT:    retq
+entry:
+  %0 = sext <4 x i8> %a to <4 x i32>
+  %1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 256>, %0
+  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
+  %op.extra = add nsw i32 %2, %c
+  ret i32 %op.extra
+}
+
+define i32 @mul_16xi8_zc(<16 x i8> %a, i32 %c) {
+; AVXVNNI-LABEL: mul_16xi8_zc:
+; AVXVNNI:       # %bb.0: # %entry
+; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVXVNNI-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVXVNNI-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vmovd %xmm0, %eax
+; AVXVNNI-NEXT:    addl %edi, %eax
+; AVXVNNI-NEXT:    vzeroupper
+; AVXVNNI-NEXT:    retq
+;
+; AVX512-LABEL: mul_16xi8_zc:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    addl %edi, %eax
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+entry:
+  %0 = zext <16 x i8> %a to <16 x i32>
+  %1 = mul nsw <16 x i32> %0, <i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64>
+  %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
+  %op.extra = add nsw i32 %2, %c
+  ret i32 %op.extra
+}
+
+define i32 @mul_32xi8_zc(<32 x i8> %a, i32 %c) {
+; AVXVNNI-LABEL: mul_32xi8_zc:
+; AVXVNNI:       # %bb.0: # %entry
+; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVXVNNI-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [18014407099482112,18014407099482112,18014407099482112,18014407099482112]
+; AVXVNNI-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
+; AVXVNNI-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVXVNNI-NEXT:    {vex} vpdpwssd %ymm2, %ymm0, %ymm1
+; AVXVNNI-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVXVNNI-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vmovd %xmm0, %eax
+; AVXVNNI-NEXT:    addl %edi, %eax
+; AVXVNNI-NEXT:    vzeroupper
+; AVXVNNI-NEXT:    retq
+;
+; AVX512-LABEL: mul_32xi8_zc:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [18014407099482112,18014407099482112,18014407099482112,18014407099482112]
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512-NEXT:    vpaddd %xmm2, %xmm3, %xmm2
+; AVX512-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    addl %edi, %eax
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+entry:
+  %0 = zext <32 x i8> %a to <32 x i32>
+  %1 = mul nsw <32 x i32> %0, <i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64>
+  %2 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1)
+  %op.extra = add nsw i32 %2, %c
+  ret i32 %op.extra
+}
+
+define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) {
+; AVXVNNI-LABEL: mul_64xi8_zc:
+; AVXVNNI:       # %bb.0: # %entry
+; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVXVNNI-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18014407099482112,18014407099482112,18014407099482112,18014407099482112]
+; AVXVNNI-NEXT:    vpmaddwd %ymm3, %ymm2, %ymm2
+; AVXVNNI-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVXVNNI-NEXT:    vpmaddwd %ymm3, %ymm0, %ymm0
+; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVXVNNI-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVXVNNI-NEXT:    {vex} vpdpwssd %ymm3, %ymm1, %ymm0
+; AVXVNNI-NEXT:    {vex} vpdpwssd %ymm3, %ymm4, %ymm2
+; AVXVNNI-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; AVXVNNI-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vmovd %xmm0, %eax
+; AVXVNNI-NEXT:    addl %edi, %eax
+; AVXVNNI-NEXT:    vzeroupper
+; AVXVNNI-NEXT:    retq
+;
+; AVX512-LABEL: mul_64xi8_zc:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [18014407099482112,18014407099482112,18014407099482112,18014407099482112]
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm3, %ymm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm3
+; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm4, %ymm4
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512-NEXT:    vpaddd %ymm4, %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vpaddd %zmm0, %zmm3, %zmm0
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    addl %edi, %eax
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+entry:
+  %0 = zext <64 x i8> %a to <64 x i32>
+  %1 = mul nsw <64 x i32> %0, <i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64>
+  %2 = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %1)
+  %op.extra = add nsw i32 %2, %c
+  ret i32 %op.extra
+}
+
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>)
+declare i32 @llvm.vector.reduce.add.v64i32(<64 x i32>)


        


More information about the llvm-commits mailing list