[llvm] r287332 - [X86][AVX2] Add v8i32->v8i64 mul test (PR30845)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 18 03:00:37 PST 2016


Author: rksimon
Date: Fri Nov 18 05:00:36 2016
New Revision: 287332

URL: http://llvm.org/viewvc/llvm-project?rev=287332&view=rev
Log:
[X86][AVX2] Add v8i32->v8i64 mul test (PR30845)

Modified:
    llvm/trunk/test/CodeGen/X86/pmul.ll

Modified: llvm/trunk/test/CodeGen/X86/pmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmul.ll?rev=287332&r1=287331&r2=287332&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmul.ll Fri Nov 18 05:00:36 2016
@@ -1465,3 +1465,186 @@ entry:
   %res = shufflevector <8 x i32> %rescast, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   ret <4 x i32> %res
 }
+
+define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
+; SSE2-LABEL: mul_v8i64_zero_upper:
+; SSE2:       # BB#0: # %entry
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
+; SSE2-NEXT:    movdqa %xmm2, %xmm8
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm6[2],xmm8[3],xmm6[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; SSE2-NEXT:    movdqa %xmm3, %xmm7
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
+; SSE2-NEXT:    movdqa %xmm1, %xmm9
+; SSE2-NEXT:    pmuludq %xmm3, %xmm9
+; SSE2-NEXT:    movdqa %xmm3, %xmm6
+; SSE2-NEXT:    psrlq $32, %xmm6
+; SSE2-NEXT:    pmuludq %xmm1, %xmm6
+; SSE2-NEXT:    psllq $32, %xmm6
+; SSE2-NEXT:    psrlq $32, %xmm1
+; SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; SSE2-NEXT:    psllq $32, %xmm1
+; SSE2-NEXT:    paddq %xmm6, %xmm1
+; SSE2-NEXT:    paddq %xmm9, %xmm1
+; SSE2-NEXT:    movdqa %xmm4, %xmm3
+; SSE2-NEXT:    pmuludq %xmm7, %xmm3
+; SSE2-NEXT:    movdqa %xmm7, %xmm6
+; SSE2-NEXT:    psrlq $32, %xmm6
+; SSE2-NEXT:    pmuludq %xmm4, %xmm6
+; SSE2-NEXT:    psllq $32, %xmm6
+; SSE2-NEXT:    psrlq $32, %xmm4
+; SSE2-NEXT:    pmuludq %xmm7, %xmm4
+; SSE2-NEXT:    psllq $32, %xmm4
+; SSE2-NEXT:    paddq %xmm6, %xmm4
+; SSE2-NEXT:    paddq %xmm3, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    pmuludq %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm2, %xmm6
+; SSE2-NEXT:    psrlq $32, %xmm6
+; SSE2-NEXT:    pmuludq %xmm0, %xmm6
+; SSE2-NEXT:    psllq $32, %xmm6
+; SSE2-NEXT:    psrlq $32, %xmm0
+; SSE2-NEXT:    pmuludq %xmm2, %xmm0
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    paddq %xmm6, %xmm0
+; SSE2-NEXT:    paddq %xmm3, %xmm0
+; SSE2-NEXT:    movdqa %xmm5, %xmm2
+; SSE2-NEXT:    pmuludq %xmm8, %xmm2
+; SSE2-NEXT:    movdqa %xmm8, %xmm3
+; SSE2-NEXT:    psrlq $32, %xmm3
+; SSE2-NEXT:    pmuludq %xmm5, %xmm3
+; SSE2-NEXT:    psllq $32, %xmm3
+; SSE2-NEXT:    psrlq $32, %xmm5
+; SSE2-NEXT:    pmuludq %xmm8, %xmm5
+; SSE2-NEXT:    psllq $32, %xmm5
+; SSE2-NEXT:    paddq %xmm3, %xmm5
+; SSE2-NEXT:    paddq %xmm2, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: mul_v8i64_zero_upper:
+; SSE41:       # BB#0: # %entry
+; SSE41-NEXT:    pxor %xmm6, %xmm6
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm8 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm9 = xmm3[0],zero,xmm3[1],zero
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; SSE41-NEXT:    movdqa %xmm1, %xmm6
+; SSE41-NEXT:    pmuludq %xmm3, %xmm6
+; SSE41-NEXT:    movdqa %xmm3, %xmm7
+; SSE41-NEXT:    psrlq $32, %xmm7
+; SSE41-NEXT:    pmuludq %xmm1, %xmm7
+; SSE41-NEXT:    psllq $32, %xmm7
+; SSE41-NEXT:    psrlq $32, %xmm1
+; SSE41-NEXT:    pmuludq %xmm3, %xmm1
+; SSE41-NEXT:    psllq $32, %xmm1
+; SSE41-NEXT:    paddq %xmm7, %xmm1
+; SSE41-NEXT:    paddq %xmm6, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pmuludq %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
+; SSE41-NEXT:    psrlq $32, %xmm6
+; SSE41-NEXT:    pmuludq %xmm0, %xmm6
+; SSE41-NEXT:    psllq $32, %xmm6
+; SSE41-NEXT:    psrlq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm2, %xmm0
+; SSE41-NEXT:    psllq $32, %xmm0
+; SSE41-NEXT:    paddq %xmm6, %xmm0
+; SSE41-NEXT:    paddq %xmm3, %xmm0
+; SSE41-NEXT:    movdqa %xmm4, %xmm2
+; SSE41-NEXT:    pmuludq %xmm9, %xmm2
+; SSE41-NEXT:    movdqa %xmm9, %xmm3
+; SSE41-NEXT:    psrlq $32, %xmm3
+; SSE41-NEXT:    pmuludq %xmm4, %xmm3
+; SSE41-NEXT:    psllq $32, %xmm3
+; SSE41-NEXT:    psrlq $32, %xmm4
+; SSE41-NEXT:    pmuludq %xmm9, %xmm4
+; SSE41-NEXT:    psllq $32, %xmm4
+; SSE41-NEXT:    paddq %xmm3, %xmm4
+; SSE41-NEXT:    paddq %xmm2, %xmm4
+; SSE41-NEXT:    movdqa %xmm5, %xmm2
+; SSE41-NEXT:    pmuludq %xmm8, %xmm2
+; SSE41-NEXT:    movdqa %xmm8, %xmm3
+; SSE41-NEXT:    psrlq $32, %xmm3
+; SSE41-NEXT:    pmuludq %xmm5, %xmm3
+; SSE41-NEXT:    psllq $32, %xmm3
+; SSE41-NEXT:    psrlq $32, %xmm5
+; SSE41-NEXT:    pmuludq %xmm8, %xmm5
+; SSE41-NEXT:    psllq $32, %xmm5
+; SSE41-NEXT:    paddq %xmm3, %xmm5
+; SSE41-NEXT:    paddq %xmm2, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,3,2,3]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX2-LABEL: mul_v8i64_zero_upper:
+; AVX2:       # BB#0: # %entry
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm4
+; AVX2-NEXT:    vpsrlq $32, %ymm1, %ymm5
+; AVX2-NEXT:    vpmuludq %ymm5, %ymm0, %ymm5
+; AVX2-NEXT:    vpsllq $32, %ymm5, %ymm5
+; AVX2-NEXT:    vpsrlq $32, %ymm0, %ymm0
+; AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllq $32, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT:    vpaddq %ymm0, %ymm4, %ymm0
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm2, %ymm1
+; AVX2-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX2-NEXT:    vpmuludq %ymm4, %ymm2, %ymm4
+; AVX2-NEXT:    vpsllq $32, %ymm4, %ymm4
+; AVX2-NEXT:    vpsrlq $32, %ymm2, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX2-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX2-NEXT:    vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: mul_v8i64_zero_upper:
+; AVX512:       # BB#0: # %entry
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT:    vpmuludq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX512-NEXT:    retq
+entry:
+  %val1a = zext <8 x i32> %val1 to <8 x i64>
+  %val2a = zext <8 x i32> %val2 to <8 x i64>
+  %res64 = mul <8 x i64> %val1a, %val2a
+  %rescast = bitcast <8 x i64> %res64 to <16 x i32>
+  %res = shufflevector <16 x i32> %rescast, <16 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7,i32 9, i32 11, i32 13, i32 15 >
+  ret <8 x i32> %res
+}




More information about the llvm-commits mailing list