[llvm] 540ed35 - [X86] Add slow/fast pmulld test coverage to vector-mul.ll

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 27 13:54:03 PDT 2021


Author: Simon Pilgrim
Date: 2021-09-27T21:53:56+01:00
New Revision: 540ed354d3bce4290f0f8fbd23d6728727d2fa9f

URL: https://github.com/llvm/llvm-project/commit/540ed354d3bce4290f0f8fbd23d6728727d2fa9f
DIFF: https://github.com/llvm/llvm-project/commit/540ed354d3bce4290f0f8fbd23d6728727d2fa9f.diff

LOG: [X86] Add slow/fast pmulld test coverage to vector-mul.ll

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vector-mul.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll
index 15cb162f029f..7dd5b4c960b9 100644
--- a/llvm/test/CodeGen/X86/vector-mul.ll
+++ b/llvm/test/CodeGen/X86/vector-mul.ll
@@ -1,6 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,X86-SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,X64-SSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2   | FileCheck %s --check-prefixes=SSE,SSE2,X86-SSE,X86-SSE2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE4,X86-SSE,X86-SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2,X64-SSE,X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2,-slow-pmulld | FileCheck %s --check-prefixes=SSE,SSE4,X64-SSE,X64-SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2,+slow-pmulld | FileCheck %s --check-prefixes=SSE,SSE4,X64-SSE,X64-SSE4
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop  | FileCheck %s --check-prefixes=X64-AVX,X64-XOP
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64-AVX,X64-AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X64-AVX,X64-AVX512DQ
@@ -89,13 +92,21 @@ define <16 x i8> @mul_v16i8_32(<16 x i8> %a0) nounwind {
 ;
 
 define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
-; SSE-LABEL: mul_v2i64_32_8:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    psllq $3, %xmm1
-; SSE-NEXT:    psllq $5, %xmm0
-; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT:    ret{{[l|q]}}
+; SSE2-LABEL: mul_v2i64_32_8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psllq $5, %xmm1
+; SSE2-NEXT:    psllq $3, %xmm0
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: mul_v2i64_32_8:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    movdqa %xmm0, %xmm1
+; SSE4-NEXT:    psllq $3, %xmm1
+; SSE4-NEXT:    psllq $5, %xmm0
+; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE4-NEXT:    ret{{[l|q]}}
 ;
 ; X64-XOP-LABEL: mul_v2i64_32_8:
 ; X64-XOP:       # %bb.0:
@@ -116,15 +127,27 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
 }
 
 define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
-; X86-SSE-LABEL: mul_v4i32_1_2_4_8:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v4i32_1_2_4_8:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-SSE-NEXT:    retq
+; SSE2-LABEL: mul_v4i32_1_2_4_8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [1,2,4,8]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; X86-SSE4-LABEL: mul_v4i32_1_2_4_8:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE4-LABEL: mul_v4i32_1_2_4_8:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-XOP-LABEL: mul_v4i32_1_2_4_8:
 ; X64-XOP:       # %bb.0:
@@ -174,19 +197,33 @@ define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind {
 }
 
 define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind {
-; SSE-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; SSE:       # %bb.0:
-; SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8]
-; SSE-NEXT:    pmullw %xmm2, %xmm0
-; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT:    pand %xmm3, %xmm0
-; SSE-NEXT:    pmullw %xmm2, %xmm1
-; SSE-NEXT:    pand %xmm3, %xmm1
-; SSE-NEXT:    packuswb %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+; SSE2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8]
+; SSE2-NEXT:    pmullw %xmm2, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pmullw %xmm2, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    packuswb %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE4-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8]
+; SSE4-NEXT:    pmullw %xmm2, %xmm0
+; SSE4-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT:    pand %xmm3, %xmm0
+; SSE4-NEXT:    pmullw %xmm2, %xmm1
+; SSE4-NEXT:    pand %xmm3, %xmm1
+; SSE4-NEXT:    packuswb %xmm0, %xmm1
+; SSE4-NEXT:    movdqa %xmm1, %xmm0
+; SSE4-NEXT:    ret{{[l|q]}}
 ;
 ; X64-XOP-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
 ; X64-XOP:       # %bb.0:
@@ -248,15 +285,23 @@ define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind {
 }
 
 define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind {
-; X86-SSE-LABEL: mul_v4i32_17:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v4i32_17:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-SSE-NEXT:    retq
+; SSE2-LABEL: mul_v4i32_17:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    pslld $4, %xmm1
+; SSE2-NEXT:    paddd %xmm0, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; X86-SSE4-LABEL: mul_v4i32_17:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE4-LABEL: mul_v4i32_17:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-XOP-LABEL: mul_v4i32_17:
 ; X64-XOP:       # %bb.0:
@@ -376,12 +421,24 @@ define <4 x i64> @mul_v4i64_17(<4 x i64> %a0) nounwind {
 }
 
 define <8 x i32> @mul_v8i32_17(<8 x i32> %a0) nounwind {
-; SSE-LABEL: mul_v8i32_17:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [17,17,17,17]
-; SSE-NEXT:    pmulld %xmm2, %xmm0
-; SSE-NEXT:    pmulld %xmm2, %xmm1
-; SSE-NEXT:    ret{{[l|q]}}
+; SSE2-LABEL: mul_v8i32_17:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pslld $4, %xmm2
+; SSE2-NEXT:    paddd %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pslld $4, %xmm3
+; SSE2-NEXT:    paddd %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    movdqa %xmm3, %xmm1
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: mul_v8i32_17:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [17,17,17,17]
+; SSE4-NEXT:    pmulld %xmm2, %xmm0
+; SSE4-NEXT:    pmulld %xmm2, %xmm1
+; SSE4-NEXT:    ret{{[l|q]}}
 ;
 ; X64-XOP-LABEL: mul_v8i32_17:
 ; X64-XOP:       # %bb.0:
@@ -521,15 +578,24 @@ define <2 x i64> @mul_v2i64_neg1025(<2 x i64> %a0) nounwind {
 }
 
 define <4 x i32> @mul_v4i32_neg33(<4 x i32> %a0) nounwind {
-; X86-SSE-LABEL: mul_v4i32_neg33:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v4i32_neg33:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-SSE-NEXT:    retq
+; SSE2-LABEL: mul_v4i32_neg33:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    pslld $5, %xmm1
+; SSE2-NEXT:    paddd %xmm0, %xmm1
+; SSE2-NEXT:    pxor %xmm0, %xmm0
+; SSE2-NEXT:    psubd %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; X86-SSE4-LABEL: mul_v4i32_neg33:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE4-LABEL: mul_v4i32_neg33:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-XOP-LABEL: mul_v4i32_neg33:
 ; X64-XOP:       # %bb.0:
@@ -665,12 +731,27 @@ define <4 x i64> @mul_v4i64_neg1025(<4 x i64> %a0) nounwind {
 }
 
 define <8 x i32> @mul_v8i32_neg33(<8 x i32> %a0) nounwind {
-; SSE-LABEL: mul_v8i32_neg33:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [4294967263,4294967263,4294967263,4294967263]
-; SSE-NEXT:    pmulld %xmm2, %xmm0
-; SSE-NEXT:    pmulld %xmm2, %xmm1
-; SSE-NEXT:    ret{{[l|q]}}
+; SSE2-LABEL: mul_v8i32_neg33:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    pslld $5, %xmm3
+; SSE2-NEXT:    paddd %xmm0, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pxor %xmm0, %xmm0
+; SSE2-NEXT:    psubd %xmm3, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pslld $5, %xmm3
+; SSE2-NEXT:    paddd %xmm1, %xmm3
+; SSE2-NEXT:    psubd %xmm3, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: mul_v8i32_neg33:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [4294967263,4294967263,4294967263,4294967263]
+; SSE4-NEXT:    pmulld %xmm2, %xmm0
+; SSE4-NEXT:    pmulld %xmm2, %xmm1
+; SSE4-NEXT:    ret{{[l|q]}}
 ;
 ; X64-XOP-LABEL: mul_v8i32_neg33:
 ; X64-XOP:       # %bb.0:
@@ -843,15 +924,27 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
 }
 
 define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind {
-; X86-SSE-LABEL: mul_v4i32_5_17_33_65:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v4i32_5_17_33_65:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-SSE-NEXT:    retq
+; SSE2-LABEL: mul_v4i32_5_17_33_65:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [5,17,33,65]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; X86-SSE4-LABEL: mul_v4i32_5_17_33_65:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE4-LABEL: mul_v4i32_5_17_33_65:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-AVX-LABEL: mul_v4i32_5_17_33_65:
 ; X64-AVX:       # %bb.0:
@@ -881,31 +974,57 @@ define <8 x i16> @mul_v8i16_2_3_9_17_33_65_129_257(<8 x i16> %a0) nounwind {
 }
 
 define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> %a0) nounwind {
-; X86-SSE-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X86-SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; X86-SSE-NEXT:    pand %xmm2, %xmm0
-; X86-SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE-NEXT:    pand %xmm2, %xmm1
-; X86-SSE-NEXT:    packuswb %xmm0, %xmm1
-; X86-SSE-NEXT:    movdqa %xmm1, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X64-SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; X64-SSE-NEXT:    pand %xmm2, %xmm0
-; X64-SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; X64-SSE-NEXT:    pand %xmm2, %xmm1
-; X64-SSE-NEXT:    packuswb %xmm0, %xmm1
-; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-SSE2-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; X86-SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X86-SSE2-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; X86-SSE2-NEXT:    pand %xmm2, %xmm1
+; X86-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X86-SSE2-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand %xmm2, %xmm0
+; X86-SSE2-NEXT:    packuswb %xmm1, %xmm0
+; X86-SSE2-NEXT:    retl
+;
+; X86-SSE4-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X86-SSE4-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X86-SSE4-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; X86-SSE4-NEXT:    pand %xmm2, %xmm0
+; X86-SSE4-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE4-NEXT:    pand %xmm2, %xmm1
+; X86-SSE4-NEXT:    packuswb %xmm0, %xmm1
+; X86-SSE4-NEXT:    movdqa %xmm1, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE2-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
+; X64-SSE2:       # %bb.0:
+; X64-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; X64-SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X64-SSE2-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; X64-SSE2-NEXT:    pand %xmm2, %xmm1
+; X64-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-SSE2-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand %xmm2, %xmm0
+; X64-SSE2-NEXT:    packuswb %xmm1, %xmm0
+; X64-SSE2-NEXT:    retq
+;
+; X64-SSE4-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X64-SSE4-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X64-SSE4-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; X64-SSE4-NEXT:    pand %xmm2, %xmm0
+; X64-SSE4-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-SSE4-NEXT:    pand %xmm2, %xmm1
+; X64-SSE4-NEXT:    packuswb %xmm0, %xmm1
+; X64-SSE4-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-XOP-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
 ; X64-XOP:       # %bb.0:
@@ -972,15 +1091,23 @@ define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind {
 }
 
 define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind {
-; X86-SSE-LABEL: mul_v4i32_7:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v4i32_7:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-SSE-NEXT:    retq
+; SSE2-LABEL: mul_v4i32_7:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    pslld $3, %xmm1
+; SSE2-NEXT:    psubd %xmm0, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; X86-SSE4-LABEL: mul_v4i32_7:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE4-LABEL: mul_v4i32_7:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-XOP-LABEL: mul_v4i32_7:
 ; X64-XOP:       # %bb.0:
@@ -1095,15 +1222,22 @@ define <2 x i64> @mul_v2i64_neg7(<2 x i64> %a0) nounwind {
 }
 
 define <4 x i32> @mul_v4i32_neg63(<4 x i32> %a0) nounwind {
-; X86-SSE-LABEL: mul_v4i32_neg63:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v4i32_neg63:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-SSE-NEXT:    retq
+; SSE2-LABEL: mul_v4i32_neg63:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    pslld $6, %xmm1
+; SSE2-NEXT:    psubd %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; X86-SSE4-LABEL: mul_v4i32_neg63:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE4-LABEL: mul_v4i32_neg63:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-XOP-LABEL: mul_v4i32_neg63:
 ; X64-XOP:       # %bb.0:
@@ -1361,11 +1495,24 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
 }
 
 define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind {
-; SSE-LABEL: mul_v2i64_0_1:
-; SSE:       # %bb.0:
-; SSE-NEXT:    xorps %xmm1, %xmm1
-; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; SSE-NEXT:    ret{{[l|q]}}
+; X86-SSE2-LABEL: mul_v2i64_0_1:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    xorpd %xmm1, %xmm1
+; X86-SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-SSE2-NEXT:    retl
+;
+; SSE4-LABEL: mul_v2i64_0_1:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    xorps %xmm1, %xmm1
+; SSE4-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; SSE4-NEXT:    ret{{[l|q]}}
+;
+; X64-SSE2-LABEL: mul_v2i64_0_1:
+; X64-SSE2:       # %bb.0:
+; X64-SSE2-NEXT:    xorps %xmm1, %xmm1
+; X64-SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; X64-SSE2-NEXT:    movaps %xmm1, %xmm0
+; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: mul_v2i64_0_1:
 ; X64-AVX:       # %bb.0:
@@ -1501,15 +1648,27 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
 }
 
 define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind {
-; X86-SSE-LABEL: mul_v4i32_0_15_31_7:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v4i32_0_15_31_7:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-SSE-NEXT:    retq
+; SSE2-LABEL: mul_v4i32_0_15_31_7:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,15,31,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; X86-SSE4-LABEL: mul_v4i32_0_15_31_7:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE4-LABEL: mul_v4i32_0_15_31_7:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-AVX-LABEL: mul_v4i32_0_15_31_7:
 ; X64-AVX:       # %bb.0:
@@ -1539,19 +1698,33 @@ define <8 x i16> @mul_v8i16_0_1_7_15_31_63_127_255(<8 x i16> %a0) nounwind {
 }
 
 define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> %a0) nounwind {
-; SSE-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; SSE:       # %bb.0:
-; SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
-; SSE-NEXT:    pmullw %xmm2, %xmm0
-; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT:    pand %xmm3, %xmm0
-; SSE-NEXT:    pmullw %xmm2, %xmm1
-; SSE-NEXT:    pand %xmm3, %xmm1
-; SSE-NEXT:    packuswb %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+; SSE2-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
+; SSE2-NEXT:    pmullw %xmm2, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pmullw %xmm2, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    packuswb %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE4-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
+; SSE4-NEXT:    pmullw %xmm2, %xmm0
+; SSE4-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT:    pand %xmm3, %xmm0
+; SSE4-NEXT:    pmullw %xmm2, %xmm1
+; SSE4-NEXT:    pand %xmm3, %xmm1
+; SSE4-NEXT:    packuswb %xmm0, %xmm1
+; SSE4-NEXT:    movdqa %xmm1, %xmm0
+; SSE4-NEXT:    ret{{[l|q]}}
 ;
 ; X64-XOP-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
 ; X64-XOP:       # %bb.0:
@@ -1693,21 +1866,43 @@ define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind {
 ; multiply inputs is loop invariant.
 ; FIXME: We should be able to insert an AssertZExt for this.
 define <2 x i64> @mul_v2i64_zext_cross_bb(<2 x i32>* %in, <2 x i32>* %y) {
-; X86-SSE-LABEL: mul_v2i64_zext_cross_bb:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
-; X86-SSE-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
-; X86-SSE-NEXT:    pmuludq %xmm1, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v2i64_zext_cross_bb:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
-; X64-SSE-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
-; X64-SSE-NEXT:    pmuludq %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-SSE2-LABEL: mul_v2i64_zext_cross_bb:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT:    pxor %xmm1, %xmm1
+; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; X86-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; X86-SSE2-NEXT:    retl
+;
+; X86-SSE4-LABEL: mul_v2i64_zext_cross_bb:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE4-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X86-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X86-SSE4-NEXT:    pmuludq %xmm1, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE2-LABEL: mul_v2i64_zext_cross_bb:
+; X64-SSE2:       # %bb.0:
+; X64-SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE2-NEXT:    pxor %xmm1, %xmm1
+; X64-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
+; X64-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; X64-SSE2-NEXT:    retq
+;
+; X64-SSE4-LABEL: mul_v2i64_zext_cross_bb:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X64-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X64-SSE4-NEXT:    pmuludq %xmm1, %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-AVX-LABEL: mul_v2i64_zext_cross_bb:
 ; X64-AVX:       # %bb.0:
@@ -1727,27 +1922,57 @@ foo:
 }
 
 define <4 x i64> @mul_v4i64_zext_cross_bb(<4 x i32>* %in, <4 x i32>* %y) {
-; X86-SSE-LABEL: mul_v4i64_zext_cross_bb:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
-; X86-SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
-; X86-SSE-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
-; X86-SSE-NEXT:    pmuludq %xmm2, %xmm1
-; X86-SSE-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
-; X86-SSE-NEXT:    pmuludq %xmm2, %xmm0
-; X86-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: mul_v4i64_zext_cross_bb:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
-; X64-SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
-; X64-SSE-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
-; X64-SSE-NEXT:    pmuludq %xmm2, %xmm1
-; X64-SSE-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
-; X64-SSE-NEXT:    pmuludq %xmm2, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-SSE2-LABEL: mul_v4i64_zext_cross_bb:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    movdqa (%ecx), %xmm0
+; X86-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; X86-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE2-NEXT:    movdqa (%eax), %xmm2
+; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[2,1,3,3]
+; X86-SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
+; X86-SSE2-NEXT:    pmuludq %xmm2, %xmm0
+; X86-SSE2-NEXT:    retl
+;
+; X86-SSE4-LABEL: mul_v4i64_zext_cross_bb:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE4-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X86-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X86-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X86-SSE4-NEXT:    pmuludq %xmm2, %xmm1
+; X86-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X86-SSE4-NEXT:    pmuludq %xmm2, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE2-LABEL: mul_v4i64_zext_cross_bb:
+; X64-SSE2:       # %bb.0:
+; X64-SSE2-NEXT:    movdqa (%rdi), %xmm0
+; X64-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X64-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; X64-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X64-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-SSE2-NEXT:    movdqa (%rsi), %xmm2
+; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[2,1,3,3]
+; X64-SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
+; X64-SSE2-NEXT:    pmuludq %xmm2, %xmm0
+; X64-SSE2-NEXT:    retq
+;
+; X64-SSE4-LABEL: mul_v4i64_zext_cross_bb:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X64-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X64-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X64-SSE4-NEXT:    pmuludq %xmm2, %xmm1
+; X64-SSE4-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X64-SSE4-NEXT:    pmuludq %xmm2, %xmm0
+; X64-SSE4-NEXT:    retq
 ;
 ; X64-XOP-LABEL: mul_v4i64_zext_cross_bb:
 ; X64-XOP:       # %bb.0:


        


More information about the llvm-commits mailing list