[llvm] r264507 - [X86][SSE] Refreshed vector integer multiply tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 26 02:35:48 PDT 2016


Author: rksimon
Date: Sat Mar 26 04:35:48 2016
New Revision: 264507

URL: http://llvm.org/viewvc/llvm-project?rev=264507&view=rev
Log:
[X86][SSE] Refreshed vector integer multiply tests

Add all 256-bit vector tests.
Added AVX512F/AVX512BW test targets.
Renamed tests something more meaningful.

Modified:
    llvm/trunk/test/CodeGen/X86/pmul.ll

Modified: llvm/trunk/test/CodeGen/X86/pmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmul.ll?rev=264507&r1=264506&r2=264507&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmul.ll Sat Mar 26 04:35:48 2016
@@ -1,10 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
 
-define <16 x i8> @mul8c(<16 x i8> %i) nounwind  {
-; SSE2-LABEL: mul8c:
+define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind  {
+; SSE2-LABEL: mul_v16i8c:
 ; SSE2:       # BB#0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
 ; SSE2-NEXT:    psraw $8, %xmm1
@@ -21,7 +23,7 @@ define <16 x i8> @mul8c(<16 x i8> %i) no
 ; SSE2-NEXT:    packuswb %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
-; SSE41-LABEL: mul8c:
+; SSE41-LABEL: mul_v16i8c:
 ; SSE41:       # BB#0: # %entry
 ; SSE41-NEXT:    pmovsxbw %xmm0, %xmm1
 ; SSE41-NEXT:    pmovsxbw {{.*}}(%rip), %xmm2
@@ -36,7 +38,7 @@ define <16 x i8> @mul8c(<16 x i8> %i) no
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX2-LABEL: mul8c:
+; AVX2-LABEL: mul_v16i8c:
 ; AVX2:       # BB#0: # %entry
 ; AVX2-NEXT:    vpmovsxbw %xmm0, %ymm0
 ; AVX2-NEXT:    vpmovsxbw {{.*}}(%rip), %ymm1
@@ -48,28 +50,45 @@ define <16 x i8> @mul8c(<16 x i8> %i) no
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: mul_v16i8c:
+; AVX512F:       # BB#0: # %entry
+; AVX512F-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512F-NEXT:    vpmovsxbw {{.*}}(%rip), %ymm1
+; AVX512F-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: mul_v16i8c:
+; AVX512BW:       # BB#0: # %entry
+; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512BW-NEXT:    vpmovsxbw {{.*}}(%rip), %ymm1
+; AVX512BW-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
 entry:
   %A = mul <16 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 >
   ret <16 x i8> %A
 }
 
-define <8 x i16> @mul16c(<8 x i16> %i) nounwind  {
-; SSE-LABEL: mul16c:
+define <8 x i16> @mul_v8i16c(<8 x i16> %i) nounwind  {
+; SSE-LABEL: mul_v8i16c:
 ; SSE:       # BB#0: # %entry
 ; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX2-LABEL: mul16c:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v8i16c:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
 entry:
   %A = mul <8 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 >
   ret <8 x i16> %A
 }
 
-define <4 x i32> @a(<4 x i32> %i) nounwind  {
-; SSE2-LABEL: a:
+define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind  {
+; SSE2-LABEL: mul_v4i32c:
 ; SSE2:       # BB#0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [117,117,117,117]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
@@ -80,23 +99,23 @@ define <4 x i32> @a(<4 x i32> %i) nounwi
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
-; SSE41-LABEL: a:
+; SSE41-LABEL: mul_v4i32c:
 ; SSE41:       # BB#0: # %entry
 ; SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX2-LABEL: a:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX2-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v4i32c:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
 entry:
   %A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
   ret <4 x i32> %A
 }
 
-define <2 x i64> @b(<2 x i64> %i) nounwind  {
-; SSE-LABEL: b:
+define <2 x i64> @mul_v2i64c(<2 x i64> %i) nounwind  {
+; SSE-LABEL: mul_v2i64c:
 ; SSE:       # BB#0: # %entry
 ; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [117,117]
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
@@ -107,22 +126,22 @@ define <2 x i64> @b(<2 x i64> %i) nounwi
 ; SSE-NEXT:    paddq %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX2-LABEL: b:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [117,117]
-; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT:    vpsrlq $32, %xmm0, %xmm0
-; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsllq $32, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v2i64c:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [117,117]
+; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    retq
 entry:
   %A = mul <2 x i64> %i, < i64 117, i64 117 >
   ret <2 x i64> %A
 }
 
-define <16 x i8> @mul8(<16 x i8> %i, <16 x i8> %j) nounwind  {
-; SSE2-LABEL: mul8:
+define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind  {
+; SSE2-LABEL: mul_v16i8:
 ; SSE2:       # BB#0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
@@ -142,7 +161,7 @@ define <16 x i8> @mul8(<16 x i8> %i, <16
 ; SSE2-NEXT:    packuswb %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
-; SSE41-LABEL: mul8:
+; SSE41-LABEL: mul_v16i8:
 ; SSE41:       # BB#0: # %entry
 ; SSE41-NEXT:    pmovsxbw %xmm1, %xmm3
 ; SSE41-NEXT:    pmovsxbw %xmm0, %xmm2
@@ -159,7 +178,7 @@ define <16 x i8> @mul8(<16 x i8> %i, <16
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX2-LABEL: mul8:
+; AVX2-LABEL: mul_v16i8:
 ; AVX2:       # BB#0: # %entry
 ; AVX2-NEXT:    vpmovsxbw %xmm1, %ymm1
 ; AVX2-NEXT:    vpmovsxbw %xmm0, %ymm0
@@ -171,28 +190,45 @@ define <16 x i8> @mul8(<16 x i8> %i, <16
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: mul_v16i8:
+; AVX512F:       # BB#0: # %entry
+; AVX512F-NEXT:    vpmovsxbw %xmm1, %ymm1
+; AVX512F-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: mul_v16i8:
+; AVX512BW:       # BB#0: # %entry
+; AVX512BW-NEXT:    vpmovsxbw %xmm1, %ymm1
+; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512BW-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
 entry:
   %A = mul <16 x i8> %i, %j
   ret <16 x i8> %A
 }
 
-define <8 x i16> @mul16(<8 x i16> %i, <8 x i16> %j) nounwind  {
-; SSE-LABEL: mul16:
+define <8 x i16> @mul_v8i16(<8 x i16> %i, <8 x i16> %j) nounwind  {
+; SSE-LABEL: mul_v8i16:
 ; SSE:       # BB#0: # %entry
 ; SSE-NEXT:    pmullw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX2-LABEL: mul16:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v8i16:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
 entry:
   %A = mul <8 x i16> %i, %j
   ret <8 x i16> %A
 }
 
-define <4 x i32> @c(<4 x i32> %i, <4 x i32> %j) nounwind  {
-; SSE2-LABEL: c:
+define <4 x i32> @mul_v4i32(<4 x i32> %i, <4 x i32> %j) nounwind  {
+; SSE2-LABEL: mul_v4i32:
 ; SSE2:       # BB#0: # %entry
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE2-NEXT:    pmuludq %xmm1, %xmm0
@@ -203,22 +239,22 @@ define <4 x i32> @c(<4 x i32> %i, <4 x i
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
-; SSE41-LABEL: c:
+; SSE41-LABEL: mul_v4i32:
 ; SSE41:       # BB#0: # %entry
 ; SSE41-NEXT:    pmulld %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX2-LABEL: c:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v4i32:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
 entry:
   %A = mul <4 x i32> %i, %j
   ret <4 x i32> %A
 }
 
-define <2 x i64> @d(<2 x i64> %i, <2 x i64> %j) nounwind  {
-; SSE-LABEL: d:
+define <2 x i64> @mul_v2i64(<2 x i64> %i, <2 x i64> %j) nounwind  {
+; SSE-LABEL: mul_v2i64:
 ; SSE:       # BB#0: # %entry
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
 ; SSE-NEXT:    pmuludq %xmm1, %xmm2
@@ -233,18 +269,18 @@ define <2 x i64> @d(<2 x i64> %i, <2 x i
 ; SSE-NEXT:    paddq %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX2-LABEL: d:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT:    vpsrlq $32, %xmm1, %xmm3
-; AVX2-NEXT:    vpmuludq %xmm3, %xmm0, %xmm3
-; AVX2-NEXT:    vpsllq $32, %xmm3, %xmm3
-; AVX2-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
-; AVX2-NEXT:    vpsrlq $32, %xmm0, %xmm0
-; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsllq $32, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v2i64:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpsrlq $32, %xmm1, %xmm3
+; AVX-NEXT:    vpmuludq %xmm3, %xmm0, %xmm3
+; AVX-NEXT:    vpsllq $32, %xmm3, %xmm3
+; AVX-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    retq
 entry:
   %A = mul <2 x i64> %i, %j
   ret <2 x i64> %A
@@ -252,8 +288,8 @@ entry:
 
 declare void @foo()
 
-define <4 x i32> @e(<4 x i32> %i, <4 x i32> %j) nounwind  {
-; SSE2-LABEL: e:
+define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind  {
+; SSE2-LABEL: mul_v4i32spill:
 ; SSE2:       # BB#0: # %entry
 ; SSE2-NEXT:    subq $40, %rsp
 ; SSE2-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
@@ -271,7 +307,7 @@ define <4 x i32> @e(<4 x i32> %i, <4 x i
 ; SSE2-NEXT:    addq $40, %rsp
 ; SSE2-NEXT:    retq
 ;
-; SSE41-LABEL: e:
+; SSE41-LABEL: mul_v4i32spill:
 ; SSE41:       # BB#0: # %entry
 ; SSE41-NEXT:    subq $40, %rsp
 ; SSE41-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
@@ -282,16 +318,16 @@ define <4 x i32> @e(<4 x i32> %i, <4 x i
 ; SSE41-NEXT:    addq $40, %rsp
 ; SSE41-NEXT:    retq
 ;
-; AVX2-LABEL: e:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    subq $40, %rsp
-; AVX2-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX2-NEXT:    callq foo
-; AVX2-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT:    vpmulld {{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX2-NEXT:    addq $40, %rsp
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v4i32spill:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    subq $40, %rsp
+; AVX-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT:    callq foo
+; AVX-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vpmulld {{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    addq $40, %rsp
+; AVX-NEXT:    retq
 entry:
   ; Use a call to force spills.
   call void @foo()
@@ -299,8 +335,8 @@ entry:
   ret <4 x i32> %A
 }
 
-define <2 x i64> @f(<2 x i64> %i, <2 x i64> %j) nounwind  {
-; SSE-LABEL: f:
+define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind  {
+; SSE-LABEL: mul_v2i64spill:
 ; SSE:       # BB#0: # %entry
 ; SSE-NEXT:    subq $40, %rsp
 ; SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
@@ -322,25 +358,25 @@ define <2 x i64> @f(<2 x i64> %i, <2 x i
 ; SSE-NEXT:    addq $40, %rsp
 ; SSE-NEXT:    retq
 ;
-; AVX2-LABEL: f:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    subq $40, %rsp
-; AVX2-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX2-NEXT:    callq foo
-; AVX2-NEXT:    vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; AVX2-NEXT:    vmovdqa (%rsp), %xmm3 # 16-byte Reload
-; AVX2-NEXT:    vpmuludq %xmm2, %xmm3, %xmm0
-; AVX2-NEXT:    vpsrlq $32, %xmm2, %xmm1
-; AVX2-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
-; AVX2-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlq $32, %xmm3, %xmm1
-; AVX2-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
-; AVX2-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    addq $40, %rsp
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v2i64spill:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    subq $40, %rsp
+; AVX-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT:    callq foo
+; AVX-NEXT:    vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; AVX-NEXT:    vmovdqa (%rsp), %xmm3 # 16-byte Reload
+; AVX-NEXT:    vpmuludq %xmm2, %xmm3, %xmm0
+; AVX-NEXT:    vpsrlq $32, %xmm2, %xmm1
+; AVX-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
+; AVX-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlq $32, %xmm3, %xmm1
+; AVX-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    addq $40, %rsp
+; AVX-NEXT:    retq
 entry:
   ; Use a call to force spills.
   call void @foo()
@@ -348,8 +384,164 @@ entry:
   ret <2 x i64> %A
 }
 
-define <4 x i64> @b1(<4 x i64> %i) nounwind  {
-; SSE-LABEL: b1:
+define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind  {
+; SSE2-LABEL: mul_v32i8c:
+; SSE2:       # BB#0: # %entry
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
+; SSE2-NEXT:    psraw $8, %xmm2
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT:    psraw $8, %xmm3
+; SSE2-NEXT:    pmullw %xmm2, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm4, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psraw $8, %xmm0
+; SSE2-NEXT:    pmullw %xmm2, %xmm0
+; SSE2-NEXT:    pand %xmm4, %xmm0
+; SSE2-NEXT:    packuswb %xmm3, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT:    psraw $8, %xmm3
+; SSE2-NEXT:    pmullw %xmm2, %xmm3
+; SSE2-NEXT:    pand %xmm4, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psraw $8, %xmm1
+; SSE2-NEXT:    pmullw %xmm2, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    packuswb %xmm3, %xmm1
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: mul_v32i8c:
+; SSE41:       # BB#0: # %entry
+; SSE41-NEXT:    pmovsxbw %xmm0, %xmm2
+; SSE41-NEXT:    pmovsxbw {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    pmullw %xmm4, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm5, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT:    pmovsxbw %xmm0, %xmm0
+; SSE41-NEXT:    pmullw %xmm4, %xmm0
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    packuswb %xmm0, %xmm2
+; SSE41-NEXT:    pmovsxbw %xmm1, %xmm3
+; SSE41-NEXT:    pmullw %xmm4, %xmm3
+; SSE41-NEXT:    pand %xmm5, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT:    pmovsxbw %xmm0, %xmm0
+; SSE41-NEXT:    pmullw %xmm4, %xmm0
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    packuswb %xmm0, %xmm3
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm3, %xmm1
+; SSE41-NEXT:    retq
+;
+; AVX2-LABEL: mul_v32i8c:
+; AVX2:       # BB#0: # %entry
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT:    vpmovsxbw {{.*}}(%rip), %ymm2
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; AVX2-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: mul_v32i8c:
+; AVX512F:       # BB#0: # %entry
+; AVX512F-NEXT:    vpmovsxbw %xmm0, %ymm1
+; AVX512F-NEXT:    vpmovsxbw {{.*}}(%rip), %ymm2
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmovsxwd %ymm1, %zmm1
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512F-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: mul_v32i8c:
+; AVX512BW:       # BB#0: # %entry
+; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm1
+; AVX512BW-NEXT:    vpmovsxbw {{.*}}(%rip), %ymm2
+; AVX512BW-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512BW-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+entry:
+  %A = mul <32 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 >
+  ret <32 x i8> %A
+}
+
+define <16 x i16> @mul_v16i16c(<16 x i16> %i) nounwind  {
+; SSE-LABEL: mul_v16i16c:
+; SSE:       # BB#0: # %entry
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
+; SSE-NEXT:    pmullw %xmm2, %xmm0
+; SSE-NEXT:    pmullw %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: mul_v16i16c:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    retq
+entry:
+  %A = mul <16 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 >
+  ret <16 x i16> %A
+}
+
+define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind  {
+; SSE2-LABEL: mul_v8i32c:
+; SSE2:       # BB#0: # %entry
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [117,117,117,117]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: mul_v8i32c:
+; SSE41:       # BB#0: # %entry
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [117,117,117,117]
+; SSE41-NEXT:    pmulld %xmm2, %xmm0
+; SSE41-NEXT:    pmulld %xmm2, %xmm1
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: mul_v8i32c:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+entry:
+  %A = mul <8 x i32> %i, < i32 117, i32 117, i32 117, i32 117, i32 117, i32 117, i32 117, i32 117 >
+  ret <8 x i32> %A
+}
+
+define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind  {
+; SSE-LABEL: mul_v4i64c:
 ; SSE:       # BB#0: # %entry
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [117,117]
 ; SSE-NEXT:    movdqa %xmm0, %xmm3
@@ -366,22 +558,195 @@ define <4 x i64> @b1(<4 x i64> %i) nounw
 ; SSE-NEXT:    paddq %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX2-LABEL: b1:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1
-; AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT:    vpsrlq $32, %ymm0, %ymm0
-; AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpsllq $32, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v4i64c:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX-NEXT:    vpmuludq %ymm1, %ymm0, %ymm2
+; AVX-NEXT:    vpsrlq $32, %ymm0, %ymm0
+; AVX-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vpsllq $32, %ymm0, %ymm0
+; AVX-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX-NEXT:    retq
 entry:
   %A = mul <4 x i64> %i, < i64 117, i64 117, i64 117, i64 117 >
   ret <4 x i64> %A
 }
 
-define <4 x i64> @b2(<4 x i64> %i, <4 x i64> %j) nounwind  {
-; SSE-LABEL: b2:
+define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind  {
+; SSE2-LABEL: mul_v32i8:
+; SSE2:       # BB#0: # %entry
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT:    psraw $8, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT:    psraw $8, %xmm5
+; SSE2-NEXT:    pmullw %xmm4, %xmm5
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psraw $8, %xmm2
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psraw $8, %xmm0
+; SSE2-NEXT:    pmullw %xmm2, %xmm0
+; SSE2-NEXT:    pand %xmm4, %xmm0
+; SSE2-NEXT:    packuswb %xmm5, %xmm0
+; SSE2-NEXT:    movdqa %xmm3, %xmm2
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT:    psraw $8, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT:    psraw $8, %xmm5
+; SSE2-NEXT:    pmullw %xmm2, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psraw $8, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psraw $8, %xmm1
+; SSE2-NEXT:    pmullw %xmm3, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    packuswb %xmm5, %xmm1
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: mul_v32i8:
+; SSE41:       # BB#0: # %entry
+; SSE41-NEXT:    pmovsxbw %xmm2, %xmm5
+; SSE41-NEXT:    pmovsxbw %xmm0, %xmm4
+; SSE41-NEXT:    pmullw %xmm5, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm5, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; SSE41-NEXT:    pmovsxbw %xmm2, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT:    pmovsxbw %xmm0, %xmm0
+; SSE41-NEXT:    pmullw %xmm2, %xmm0
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    packuswb %xmm0, %xmm4
+; SSE41-NEXT:    pmovsxbw %xmm3, %xmm0
+; SSE41-NEXT:    pmovsxbw %xmm1, %xmm2
+; SSE41-NEXT:    pmullw %xmm0, %xmm2
+; SSE41-NEXT:    pand %xmm5, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; SSE41-NEXT:    pmovsxbw %xmm0, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE41-NEXT:    pmovsxbw %xmm1, %xmm1
+; SSE41-NEXT:    pmullw %xmm0, %xmm1
+; SSE41-NEXT:    pand %xmm5, %xmm1
+; SSE41-NEXT:    packuswb %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    movdqa %xmm2, %xmm1
+; SSE41-NEXT:    retq
+;
+; AVX2-LABEL: mul_v32i8:
+; AVX2:       # BB#0: # %entry
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX2-NEXT:    vpmovsxbw %xmm3, %ymm3
+; AVX2-NEXT:    vpmullw %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX2-NEXT:    vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: mul_v32i8:
+; AVX512F:       # BB#0: # %entry
+; AVX512F-NEXT:    vpmovsxbw %xmm1, %ymm2
+; AVX512F-NEXT:    vpmovsxbw %xmm0, %ymm3
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpmovsxwd %ymm2, %zmm2
+; AVX512F-NEXT:    vpmovdb %zmm2, %xmm2
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512F-NEXT:    vpmovsxbw %xmm1, %ymm1
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512F-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: mul_v32i8:
+; AVX512BW:       # BB#0: # %entry
+; AVX512BW-NEXT:    vpmovsxbw %xmm1, %ymm2
+; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm3
+; AVX512BW-NEXT:    vpmullw %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT:    vpmovwb %zmm2, %ymm2
+; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512BW-NEXT:    vpmovsxbw %xmm1, %ymm1
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512BW-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX512BW-NEXT:    retq
+entry:
+  %A = mul <32 x i8> %i, %j
+  ret <32 x i8> %A
+}
+
+define <16 x i16> @mul_v16i16(<16 x i16> %i, <16 x i16> %j) nounwind  {
+; SSE-LABEL: mul_v16i16:
+; SSE:       # BB#0: # %entry
+; SSE-NEXT:    pmullw %xmm2, %xmm0
+; SSE-NEXT:    pmullw %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: mul_v16i16:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+entry:
+  %A = mul <16 x i16> %i, %j
+  ret <16 x i16> %A
+}
+
+define <8 x i32> @mul_v8i32(<8 x i32> %i, <8 x i32> %j) nounwind  {
+; SSE2-LABEL: mul_v8i32:
+; SSE2:       # BB#0: # %entry
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm4, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: mul_v8i32:
+; SSE41:       # BB#0: # %entry
+; SSE41-NEXT:    pmulld %xmm2, %xmm0
+; SSE41-NEXT:    pmulld %xmm3, %xmm1
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: mul_v8i32:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+entry:
+  %A = mul <8 x i32> %i, %j
+  ret <8 x i32> %A
+}
+
+define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind  {
+; SSE-LABEL: mul_v4i64:
 ; SSE:       # BB#0: # %entry
 ; SSE-NEXT:    movdqa %xmm0, %xmm4
 ; SSE-NEXT:    pmuludq %xmm2, %xmm4
@@ -407,20 +772,19 @@ define <4 x i64> @b2(<4 x i64> %i, <4 x
 ; SSE-NEXT:    paddq %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX2-LABEL: b2:
-; AVX2:       # BB#0: # %entry
-; AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT:    vpsrlq $32, %ymm1, %ymm3
-; AVX2-NEXT:    vpmuludq %ymm3, %ymm0, %ymm3
-; AVX2-NEXT:    vpsllq $32, %ymm3, %ymm3
-; AVX2-NEXT:    vpaddq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vpsrlq $32, %ymm0, %ymm0
-; AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpsllq $32, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: mul_v4i64:
+; AVX:       # BB#0: # %entry
+; AVX-NEXT:    vpmuludq %ymm1, %ymm0, %ymm2
+; AVX-NEXT:    vpsrlq $32, %ymm1, %ymm3
+; AVX-NEXT:    vpmuludq %ymm3, %ymm0, %ymm3
+; AVX-NEXT:    vpsllq $32, %ymm3, %ymm3
+; AVX-NEXT:    vpaddq %ymm3, %ymm2, %ymm2
+; AVX-NEXT:    vpsrlq $32, %ymm0, %ymm0
+; AVX-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vpsllq $32, %ymm0, %ymm0
+; AVX-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX-NEXT:    retq
 entry:
   %A = mul <4 x i64> %i, %j
   ret <4 x i64> %A
 }
-




More information about the llvm-commits mailing list