[llvm] r283672 - [X86][SSE] Regenerate and add 32-bit tests to widening tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Oct 8 12:54:29 PDT 2016


Author: rksimon
Date: Sat Oct  8 14:54:28 2016
New Revision: 283672

URL: http://llvm.org/viewvc/llvm-project?rev=283672&view=rev
Log:
[X86][SSE] Regenerate and add 32-bit tests to widening tests

Modified:
    llvm/trunk/test/CodeGen/X86/WidenArith.ll
    llvm/trunk/test/CodeGen/X86/widen_cast-3.ll
    llvm/trunk/test/CodeGen/X86/widen_cast-5.ll
    llvm/trunk/test/CodeGen/X86/widen_cast-6.ll
    llvm/trunk/test/CodeGen/X86/widen_conversions.ll
    llvm/trunk/test/CodeGen/X86/widen_load-0.ll
    llvm/trunk/test/CodeGen/X86/widen_load-2.ll
    llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll

Modified: llvm/trunk/test/CodeGen/X86/WidenArith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/WidenArith.ll?rev=283672&r1=283671&r2=283672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/WidenArith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/WidenArith.ll Sat Oct  8 14:54:28 2016
@@ -1,17 +1,29 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
 
 define <8 x i32> @test(<8 x float> %a, <8 x float> %b) {
-; CHECK-LABEL: test:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm2
-; CHECK-NEXT:    vmulps %ymm0, %ymm1, %ymm1
-; CHECK-NEXT:    vsubps %ymm2, %ymm1, %ymm3
-; CHECK-NEXT:    vcmpltps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vcmpltps %ymm3, %ymm2, %ymm1
-; CHECK-NEXT:    vandps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X86-LABEL: test:
+; X86:       # BB#0:
+; X86-NEXT:    vaddps %ymm1, %ymm0, %ymm2
+; X86-NEXT:    vmulps %ymm0, %ymm1, %ymm1
+; X86-NEXT:    vsubps %ymm2, %ymm1, %ymm3
+; X86-NEXT:    vcmpltps %ymm1, %ymm0, %ymm0
+; X86-NEXT:    vcmpltps %ymm3, %ymm2, %ymm1
+; X86-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; X86-NEXT:    vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: test:
+; X64:       # BB#0:
+; X64-NEXT:    vaddps %ymm1, %ymm0, %ymm2
+; X64-NEXT:    vmulps %ymm0, %ymm1, %ymm1
+; X64-NEXT:    vsubps %ymm2, %ymm1, %ymm3
+; X64-NEXT:    vcmpltps %ymm1, %ymm0, %ymm0
+; X64-NEXT:    vcmpltps %ymm3, %ymm2, %ymm1
+; X64-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; X64-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    retq
  %c1 = fadd <8 x float> %a, %b
  %b1 = fmul <8 x float> %b, %a
  %d  = fsub <8 x float> %b1, %c1

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-3.ll?rev=283672&r1=283671&r2=283672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-3.ll Sat Oct  8 14:54:28 2016
@@ -1,14 +1,25 @@
-; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s
-; CHECK: paddd
-; CHECK: pextrd
-; CHECK: pextrd
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
 
 ; bitcast v12i8 to v3i32
 
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin10.0.0d2"
-
 define void @convert(<12 x i8>* %dst.addr, <3 x i32> %src) nounwind {
+; X86-LABEL: convert:
+; X86:       ## BB#0: ## %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    paddd LCPI0_0, %xmm0
+; X86-NEXT:    pextrd $2, %xmm0, 8(%eax)
+; X86-NEXT:    pextrd $1, %xmm0, 4(%eax)
+; X86-NEXT:    movd %xmm0, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: convert:
+; X64:       ## BB#0: ## %entry
+; X64-NEXT:    paddd {{.*}}(%rip), %xmm0
+; X64-NEXT:    pextrd $2, %xmm0, 8(%rdi)
+; X64-NEXT:    movq %xmm0, (%rdi)
+; X64-NEXT:    retq
 entry:
 	%add = add <3 x i32> %src, < i32 1, i32 1, i32 1 >		; <<3 x i32>> [#uses=1]
 	%conv = bitcast <3 x i32> %add to <12 x i8>		; <<12 x i8>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-5.ll?rev=283672&r1=283671&r2=283672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-5.ll Sat Oct  8 14:54:28 2016
@@ -1,9 +1,27 @@
-; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s
-; CHECK: movl
-; CHECK: movq
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
 
 ; bitcast a i64 to v2i32
+
 define void @convert(<2 x i32>* %dst.addr, i64 %src) nounwind {
+; X86-LABEL: convert:
+; X86:       ## BB#0: ## %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X86-NEXT:    pxor LCPI0_0, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: convert:
+; X64:       ## BB#0: ## %entry
+; X64-NEXT:    movd %rsi, %xmm0
+; X64-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; X64-NEXT:    pxor {{.*}}(%rip), %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%rdi)
+; X64-NEXT:    retq
 entry:
 	%conv = bitcast i64 %src to <2 x i32>
 	%xor = xor <2 x i32> %conv, < i32 255, i32 32767 >

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-6.ll?rev=283672&r1=283671&r2=283672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-6.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-6.ll Sat Oct  8 14:54:28 2016
@@ -1,13 +1,21 @@
-; RUN: llc < %s -march=x86 -mattr=+sse4.1 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
 
 ; Test bit convert that requires widening in the operand.
 
 define i32 @return_v2hi() nounwind {
-; CHECK-LABEL: @return_v2hi
-; CHECK:      pushl
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: popl
-; CHECK-NEXT: ret
+; X86-LABEL: return_v2hi:
+; X86:       ## BB#0: ## %entry
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    popl %ecx
+; X86-NEXT:    retl
+;
+; X64-LABEL: return_v2hi:
+; X64:       ## BB#0: ## %entry
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    retq
 entry:
 	%retval12 = bitcast <2 x i16> zeroinitializer to i32		; <i32> [#uses=1]
 	ret i32 %retval12

Modified: llvm/trunk/test/CodeGen/X86/widen_conversions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_conversions.ll?rev=283672&r1=283671&r2=283672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_conversions.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_conversions.ll Sat Oct  8 14:54:28 2016
@@ -1,17 +1,24 @@
-; RUN: llc < %s -mcpu=x86-64 -x86-experimental-vector-widening-legalization | FileCheck %s
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-unknown"
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=X64
 
 define <4 x i32> @zext_v4i8_to_v4i32(<4 x i8>* %ptr) {
-; CHECK-LABEL: zext_v4i8_to_v4i32:
-; 
-; CHECK:      movd (%{{.*}}), %[[X:xmm[0-9]+]]
-; CHECK-NEXT: pxor %[[Z:xmm[0-9]+]], %[[Z]]
-; CHECK-NEXT: punpcklbw %[[Z]], %[[X]]
-; CHECK-NEXT: punpcklwd %[[Z]], %[[X]]
-; CHECK-NEXT: ret
-
+; X86-LABEL: zext_v4i8_to_v4i32:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pxor %xmm1, %xmm1
+; X86-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X86-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-NEXT:    retl
+;
+; X64-LABEL: zext_v4i8_to_v4i32:
+; X64:       # BB#0:
+; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    pxor %xmm1, %xmm1
+; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT:    retq
   %val = load <4 x i8>, <4 x i8>* %ptr
   %ext = zext <4 x i8> %val to <4 x i32>
   ret <4 x i32> %ext

Modified: llvm/trunk/test/CodeGen/X86/widen_load-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_load-0.ll?rev=283672&r1=283671&r2=283672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_load-0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_load-0.ll Sat Oct  8 14:54:28 2016
@@ -1,14 +1,31 @@
-; RUN: llc < %s -o - -mtriple=x86_64-linux | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
+
 ; PR4891
 
 ; Both loads should happen before either store.
 
-; CHECK: movl  ({{.*}}), {{.*}}
-; CHECK: movl  ({{.*}}), {{.*}}
-; CHECK: movl  {{.*}}, ({{.*}})
-; CHECK: movl  {{.*}}, ({{.*}})
-
 define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
+; X86-LABEL: short2_int_swap:
+; X86:       # BB#0: # %entry
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %edx
+; X86-NEXT:    movl (%eax), %esi
+; X86-NEXT:    movl %edx, (%eax)
+; X86-NEXT:    movl %esi, (%ecx)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: short2_int_swap:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movl (%rsi), %eax
+; X64-NEXT:    movl (%rdi), %ecx
+; X64-NEXT:    movl %eax, (%rdi)
+; X64-NEXT:    movl %ecx, (%rsi)
+; X64-NEXT:    retq
 entry:
   %0 = load <2 x i16>, <2 x i16>* %b, align 2                ; <<2 x i16>> [#uses=1]
   %1 = load i32, i32* %c, align 4                      ; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/widen_load-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_load-2.ll?rev=283672&r1=283671&r2=283672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_load-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_load-2.ll Sat Oct  8 14:54:28 2016
@@ -1,19 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
 
 ; Test based on pr5626 to load/store
 ;
 
 %i32vec3 = type <3 x i32>
 define void @add3i32(%i32vec3*  sret %ret, %i32vec3* %ap, %i32vec3* %bp)  {
-; CHECK-LABEL: add3i32:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movdqa (%rsi), %xmm0
-; CHECK-NEXT:    paddd (%rdx), %xmm0
-; CHECK-NEXT:    pextrd $2, %xmm0, 8(%rdi)
-; CHECK-NEXT:    movq %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add3i32:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movdqa (%edx), %xmm0
+; X86-NEXT:    paddd (%ecx), %xmm0
+; X86-NEXT:    pextrd $2, %xmm0, 8(%eax)
+; X86-NEXT:    pextrd $1, %xmm0, 4(%eax)
+; X86-NEXT:    movd %xmm0, (%eax)
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add3i32:
+; X64:       # BB#0:
+; X64-NEXT:    movdqa (%rsi), %xmm0
+; X64-NEXT:    paddd (%rdx), %xmm0
+; X64-NEXT:    pextrd $2, %xmm0, 8(%rdi)
+; X64-NEXT:    movq %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i32vec3, %i32vec3* %ap, align 16
 	%b = load %i32vec3, %i32vec3* %bp, align 16
 	%x = add %i32vec3 %a, %b
@@ -22,17 +35,34 @@ define void @add3i32(%i32vec3*  sret %re
 }
 
 define void @add3i32_2(%i32vec3*  sret %ret, %i32vec3* %ap, %i32vec3* %bp)  {
-; CHECK-LABEL: add3i32_2:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    pinsrd $2, 8(%rsi), %xmm0
-; CHECK-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT:    pinsrd $2, 8(%rdx), %xmm1
-; CHECK-NEXT:    paddd %xmm0, %xmm1
-; CHECK-NEXT:    pextrd $2, %xmm1, 8(%rdi)
-; CHECK-NEXT:    movq %xmm1, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add3i32_2:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pinsrd $1, 4(%edx), %xmm0
+; X86-NEXT:    pinsrd $2, 8(%edx), %xmm0
+; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    pinsrd $1, 4(%ecx), %xmm1
+; X86-NEXT:    pinsrd $2, 8(%ecx), %xmm1
+; X86-NEXT:    paddd %xmm0, %xmm1
+; X86-NEXT:    pextrd $2, %xmm1, 8(%eax)
+; X86-NEXT:    pextrd $1, %xmm1, 4(%eax)
+; X86-NEXT:    movd %xmm1, (%eax)
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add3i32_2:
+; X64:       # BB#0:
+; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    pinsrd $2, 8(%rsi), %xmm0
+; X64-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT:    pinsrd $2, 8(%rdx), %xmm1
+; X64-NEXT:    paddd %xmm0, %xmm1
+; X64-NEXT:    pextrd $2, %xmm1, 8(%rdi)
+; X64-NEXT:    movq %xmm1, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i32vec3, %i32vec3* %ap, align 8
 	%b = load %i32vec3, %i32vec3* %bp, align 8
 	%x = add %i32vec3 %a, %b
@@ -42,17 +72,32 @@ define void @add3i32_2(%i32vec3*  sret %
 
 %i32vec7 = type <7 x i32>
 define void @add7i32(%i32vec7*  sret %ret, %i32vec7* %ap, %i32vec7* %bp)  {
-; CHECK-LABEL: add7i32:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movdqa (%rsi), %xmm0
-; CHECK-NEXT:    movdqa 16(%rsi), %xmm1
-; CHECK-NEXT:    paddd (%rdx), %xmm0
-; CHECK-NEXT:    paddd 16(%rdx), %xmm1
-; CHECK-NEXT:    pextrd $2, %xmm1, 24(%rdi)
-; CHECK-NEXT:    movq %xmm1, 16(%rdi)
-; CHECK-NEXT:    movdqa %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add7i32:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movdqa (%edx), %xmm0
+; X86-NEXT:    movdqa 16(%edx), %xmm1
+; X86-NEXT:    paddd (%ecx), %xmm0
+; X86-NEXT:    paddd 16(%ecx), %xmm1
+; X86-NEXT:    pextrd $2, %xmm1, 24(%eax)
+; X86-NEXT:    pextrd $1, %xmm1, 20(%eax)
+; X86-NEXT:    movd %xmm1, 16(%eax)
+; X86-NEXT:    movdqa %xmm0, (%eax)
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add7i32:
+; X64:       # BB#0:
+; X64-NEXT:    movdqa (%rsi), %xmm0
+; X64-NEXT:    movdqa 16(%rsi), %xmm1
+; X64-NEXT:    paddd (%rdx), %xmm0
+; X64-NEXT:    paddd 16(%rdx), %xmm1
+; X64-NEXT:    pextrd $2, %xmm1, 24(%rdi)
+; X64-NEXT:    movq %xmm1, 16(%rdi)
+; X64-NEXT:    movdqa %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i32vec7, %i32vec7* %ap, align 16
 	%b = load %i32vec7, %i32vec7* %bp, align 16
 	%x = add %i32vec7 %a, %b
@@ -62,19 +107,35 @@ define void @add7i32(%i32vec7*  sret %re
 
 %i32vec12 = type <12 x i32>
 define void @add12i32(%i32vec12*  sret %ret, %i32vec12* %ap, %i32vec12* %bp)  {
-; CHECK-LABEL: add12i32:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movdqa (%rsi), %xmm0
-; CHECK-NEXT:    movdqa 16(%rsi), %xmm1
-; CHECK-NEXT:    movdqa 32(%rsi), %xmm2
-; CHECK-NEXT:    paddd (%rdx), %xmm0
-; CHECK-NEXT:    paddd 16(%rdx), %xmm1
-; CHECK-NEXT:    paddd 32(%rdx), %xmm2
-; CHECK-NEXT:    movdqa %xmm2, 32(%rdi)
-; CHECK-NEXT:    movdqa %xmm1, 16(%rdi)
-; CHECK-NEXT:    movdqa %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add12i32:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movdqa 32(%edx), %xmm0
+; X86-NEXT:    movdqa (%edx), %xmm1
+; X86-NEXT:    movdqa 16(%edx), %xmm2
+; X86-NEXT:    paddd (%ecx), %xmm1
+; X86-NEXT:    paddd 16(%ecx), %xmm2
+; X86-NEXT:    paddd 32(%ecx), %xmm0
+; X86-NEXT:    movdqa %xmm0, 32(%eax)
+; X86-NEXT:    movdqa %xmm2, 16(%eax)
+; X86-NEXT:    movdqa %xmm1, (%eax)
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add12i32:
+; X64:       # BB#0:
+; X64-NEXT:    movdqa (%rsi), %xmm0
+; X64-NEXT:    movdqa 16(%rsi), %xmm1
+; X64-NEXT:    movdqa 32(%rsi), %xmm2
+; X64-NEXT:    paddd (%rdx), %xmm0
+; X64-NEXT:    paddd 16(%rdx), %xmm1
+; X64-NEXT:    paddd 32(%rdx), %xmm2
+; X64-NEXT:    movdqa %xmm2, 32(%rdi)
+; X64-NEXT:    movdqa %xmm1, 16(%rdi)
+; X64-NEXT:    movdqa %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i32vec12, %i32vec12* %ap, align 16
 	%b = load %i32vec12, %i32vec12* %bp, align 16
 	%x = add %i32vec12 %a, %b
@@ -85,17 +146,41 @@ define void @add12i32(%i32vec12*  sret %
 
 %i16vec3 = type <3 x i16>
 define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind {
-; CHECK-LABEL: add3i16:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; CHECK-NEXT:    pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; CHECK-NEXT:    paddd %xmm0, %xmm1
-; CHECK-NEXT:    pextrw $4, %xmm1, 4(%rdi)
-; CHECK-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; CHECK-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; CHECK-NEXT:    movd %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add3i16:
+; X86:       # BB#0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $24, %esp
+; X86-NEXT:    movl 8(%ebp), %eax
+; X86-NEXT:    movl 16(%ebp), %ecx
+; X86-NEXT:    movl 12(%ebp), %edx
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; X86-NEXT:    pinsrd $2, 4(%edx), %xmm0
+; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; X86-NEXT:    pinsrd $2, 4(%ecx), %xmm1
+; X86-NEXT:    paddd %xmm0, %xmm1
+; X86-NEXT:    pextrw $4, %xmm1, 4(%eax)
+; X86-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; X86-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
+; X86-NEXT:    movd %xmm0, (%eax)
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add3i16:
+; X64:       # BB#0:
+; X64-NEXT:    pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; X64-NEXT:    pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; X64-NEXT:    paddd %xmm0, %xmm1
+; X64-NEXT:    pextrw $4, %xmm1, 4(%rdi)
+; X64-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; X64-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
+; X64-NEXT:    movd %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i16vec3, %i16vec3* %ap, align 16
 	%b = load %i16vec3, %i16vec3* %bp, align 16
 	%x = add %i16vec3 %a, %b
@@ -105,14 +190,25 @@ define void @add3i16(%i16vec3* nocapture
 
 %i16vec4 = type <4 x i16>
 define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind {
-; CHECK-LABEL: add4i16:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT:    paddw %xmm0, %xmm1
-; CHECK-NEXT:    movq %xmm1, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add4i16:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    paddw %xmm0, %xmm1
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add4i16:
+; X64:       # BB#0:
+; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT:    paddw %xmm0, %xmm1
+; X64-NEXT:    movq %xmm1, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i16vec4, %i16vec4* %ap, align 16
 	%b = load %i16vec4, %i16vec4* %bp, align 16
 	%x = add %i16vec4 %a, %b
@@ -122,16 +218,30 @@ define void @add4i16(%i16vec4* nocapture
 
 %i16vec12 = type <12 x i16>
 define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind {
-; CHECK-LABEL: add12i16:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movdqa (%rsi), %xmm0
-; CHECK-NEXT:    movdqa 16(%rsi), %xmm1
-; CHECK-NEXT:    paddw (%rdx), %xmm0
-; CHECK-NEXT:    paddw 16(%rdx), %xmm1
-; CHECK-NEXT:    movq %xmm1, 16(%rdi)
-; CHECK-NEXT:    movdqa %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add12i16:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movdqa (%edx), %xmm0
+; X86-NEXT:    movdqa 16(%edx), %xmm1
+; X86-NEXT:    paddw (%ecx), %xmm0
+; X86-NEXT:    paddw 16(%ecx), %xmm1
+; X86-NEXT:    pextrd $1, %xmm1, 20(%eax)
+; X86-NEXT:    movd %xmm1, 16(%eax)
+; X86-NEXT:    movdqa %xmm0, (%eax)
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add12i16:
+; X64:       # BB#0:
+; X64-NEXT:    movdqa (%rsi), %xmm0
+; X64-NEXT:    movdqa 16(%rsi), %xmm1
+; X64-NEXT:    paddw (%rdx), %xmm0
+; X64-NEXT:    paddw 16(%rdx), %xmm1
+; X64-NEXT:    movq %xmm1, 16(%rdi)
+; X64-NEXT:    movdqa %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i16vec12, %i16vec12* %ap, align 16
 	%b = load %i16vec12, %i16vec12* %bp, align 16
 	%x = add %i16vec12 %a, %b
@@ -141,19 +251,35 @@ define void @add12i16(%i16vec12* nocaptu
 
 %i16vec18 = type <18 x i16>
 define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind {
-; CHECK-LABEL: add18i16:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movdqa (%rsi), %xmm0
-; CHECK-NEXT:    movdqa 16(%rsi), %xmm1
-; CHECK-NEXT:    movdqa 32(%rsi), %xmm2
-; CHECK-NEXT:    paddw (%rdx), %xmm0
-; CHECK-NEXT:    paddw 16(%rdx), %xmm1
-; CHECK-NEXT:    paddw 32(%rdx), %xmm2
-; CHECK-NEXT:    movd %xmm2, 32(%rdi)
-; CHECK-NEXT:    movdqa %xmm1, 16(%rdi)
-; CHECK-NEXT:    movdqa %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add18i16:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movdqa 32(%edx), %xmm0
+; X86-NEXT:    movdqa (%edx), %xmm1
+; X86-NEXT:    movdqa 16(%edx), %xmm2
+; X86-NEXT:    paddw (%ecx), %xmm1
+; X86-NEXT:    paddw 16(%ecx), %xmm2
+; X86-NEXT:    paddw 32(%ecx), %xmm0
+; X86-NEXT:    movd %xmm0, 32(%eax)
+; X86-NEXT:    movdqa %xmm2, 16(%eax)
+; X86-NEXT:    movdqa %xmm1, (%eax)
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add18i16:
+; X64:       # BB#0:
+; X64-NEXT:    movdqa (%rsi), %xmm0
+; X64-NEXT:    movdqa 16(%rsi), %xmm1
+; X64-NEXT:    movdqa 32(%rsi), %xmm2
+; X64-NEXT:    paddw (%rdx), %xmm0
+; X64-NEXT:    paddw 16(%rdx), %xmm1
+; X64-NEXT:    paddw 32(%rdx), %xmm2
+; X64-NEXT:    movd %xmm2, 32(%rdi)
+; X64-NEXT:    movdqa %xmm1, 16(%rdi)
+; X64-NEXT:    movdqa %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i16vec18, %i16vec18* %ap, align 16
 	%b = load %i16vec18, %i16vec18* %bp, align 16
 	%x = add %i16vec18 %a, %b
@@ -164,17 +290,33 @@ define void @add18i16(%i16vec18* nocaptu
 
 %i8vec3 = type <3 x i8>
 define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind {
-; CHECK-LABEL: add3i8:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; CHECK-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; CHECK-NEXT:    paddd %xmm0, %xmm1
-; CHECK-NEXT:    pextrb $8, %xmm1, 2(%rdi)
-; CHECK-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; CHECK-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; CHECK-NEXT:    pextrw $0, %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add3i8:
+; X86:       # BB#0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X86-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X86-NEXT:    paddd %xmm0, %xmm1
+; X86-NEXT:    pextrb $8, %xmm1, 2(%eax)
+; X86-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; X86-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X86-NEXT:    pextrw $0, %xmm0, (%eax)
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add3i8:
+; X64:       # BB#0:
+; X64-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X64-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X64-NEXT:    paddd %xmm0, %xmm1
+; X64-NEXT:    pextrb $8, %xmm1, 2(%rdi)
+; X64-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; X64-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X64-NEXT:    pextrw $0, %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i8vec3, %i8vec3* %ap, align 16
 	%b = load %i8vec3, %i8vec3* %bp, align 16
 	%x = add %i8vec3 %a, %b
@@ -184,19 +326,36 @@ define void @add3i8(%i8vec3* nocapture s
 
 %i8vec31 = type <31 x i8>
 define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind {
-; CHECK-LABEL: add31i8:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movdqa (%rsi), %xmm0
-; CHECK-NEXT:    movdqa 16(%rsi), %xmm1
-; CHECK-NEXT:    paddb (%rdx), %xmm0
-; CHECK-NEXT:    paddb 16(%rdx), %xmm1
-; CHECK-NEXT:    pextrb $14, %xmm1, 30(%rdi)
-; CHECK-NEXT:    pextrw $6, %xmm1, 28(%rdi)
-; CHECK-NEXT:    pextrd $2, %xmm1, 24(%rdi)
-; CHECK-NEXT:    movq %xmm1, 16(%rdi)
-; CHECK-NEXT:    movdqa %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add31i8:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movdqa (%edx), %xmm0
+; X86-NEXT:    movdqa 16(%edx), %xmm1
+; X86-NEXT:    paddb (%ecx), %xmm0
+; X86-NEXT:    paddb 16(%ecx), %xmm1
+; X86-NEXT:    pextrb $14, %xmm1, 30(%eax)
+; X86-NEXT:    pextrw $6, %xmm1, 28(%eax)
+; X86-NEXT:    pextrd $2, %xmm1, 24(%eax)
+; X86-NEXT:    pextrd $1, %xmm1, 20(%eax)
+; X86-NEXT:    movd %xmm1, 16(%eax)
+; X86-NEXT:    movdqa %xmm0, (%eax)
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: add31i8:
+; X64:       # BB#0:
+; X64-NEXT:    movdqa (%rsi), %xmm0
+; X64-NEXT:    movdqa 16(%rsi), %xmm1
+; X64-NEXT:    paddb (%rdx), %xmm0
+; X64-NEXT:    paddb 16(%rdx), %xmm1
+; X64-NEXT:    pextrb $14, %xmm1, 30(%rdi)
+; X64-NEXT:    pextrw $6, %xmm1, 28(%rdi)
+; X64-NEXT:    pextrd $2, %xmm1, 24(%rdi)
+; X64-NEXT:    movq %xmm1, 16(%rdi)
+; X64-NEXT:    movdqa %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%a = load %i8vec31, %i8vec31* %ap, align 16
 	%b = load %i8vec31, %i8vec31* %bp, align 16
 	%x = add %i8vec31 %a, %b
@@ -207,29 +366,57 @@ define void @add31i8(%i8vec31* nocapture
 
 %i8vec3pack = type { <3 x i8>, i8 }
 define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pack* %rot) nounwind {
-; CHECK-LABEL: rot:
-; CHECK:       # BB#0: # %entry
-; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = <0,4,8,128,u,u,u,u,u,u,u,u,u,u,u,u>
-; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = <158,158,158,u>
-; CHECK-NEXT:    pshufb %xmm0, %xmm1
-; CHECK-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; CHECK-NEXT:    pextrw $0, %xmm1, (%rsi)
-; CHECK-NEXT:    movb $-98, 2(%rsi)
-; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = <1,1,1,u>
-; CHECK-NEXT:    pshufb %xmm0, %xmm1
-; CHECK-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; CHECK-NEXT:    pextrw $0, %xmm0, (%rdx)
-; CHECK-NEXT:    movb $1, 2(%rdx)
-; CHECK-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; CHECK-NEXT:    movdqa %xmm0, %xmm1
-; CHECK-NEXT:    psrld $1, %xmm1
-; CHECK-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm0[6,7]
-; CHECK-NEXT:    pextrb $8, %xmm1, 2(%rdi)
-; CHECK-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; CHECK-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; CHECK-NEXT:    pextrw $0, %xmm0, (%rdi)
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: rot:
+; X86:       # BB#0: # %entry
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movdqa {{.*#+}} xmm0 = <0,4,8,128,u,u,u,u,u,u,u,u,u,u,u,u>
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = <158,158,158,u>
+; X86-NEXT:    pshufb %xmm0, %xmm1
+; X86-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X86-NEXT:    pextrw $0, %xmm1, (%edx)
+; X86-NEXT:    movb $-98, 2(%edx)
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = <1,1,1,u>
+; X86-NEXT:    pshufb %xmm0, %xmm1
+; X86-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X86-NEXT:    pextrw $0, %xmm0, (%ecx)
+; X86-NEXT:    movb $1, 2(%ecx)
+; X86-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $1, %xmm1
+; X86-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; X86-NEXT:    pextrb $8, %xmm1, 2(%eax)
+; X86-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; X86-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X86-NEXT:    pextrw $0, %xmm0, (%eax)
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: rot:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa {{.*#+}} xmm0 = <0,4,8,128,u,u,u,u,u,u,u,u,u,u,u,u>
+; X64-NEXT:    movdqa {{.*#+}} xmm1 = <158,158,158,u>
+; X64-NEXT:    pshufb %xmm0, %xmm1
+; X64-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X64-NEXT:    pextrw $0, %xmm1, (%rsi)
+; X64-NEXT:    movb $-98, 2(%rsi)
+; X64-NEXT:    movdqa {{.*#+}} xmm1 = <1,1,1,u>
+; X64-NEXT:    pshufb %xmm0, %xmm1
+; X64-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X64-NEXT:    pextrw $0, %xmm0, (%rdx)
+; X64-NEXT:    movb $1, 2(%rdx)
+; X64-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psrld $1, %xmm1
+; X64-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; X64-NEXT:    pextrb $8, %xmm1, 2(%rdi)
+; X64-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; X64-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X64-NEXT:    pextrw $0, %xmm0, (%rdi)
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 entry:
   %storetmp = bitcast %i8vec3pack* %X to <3 x i8>*
   store <3 x i8> <i8 -98, i8 -98, i8 -98>, <3 x i8>* %storetmp

Modified: llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll?rev=283672&r1=283671&r2=283672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll Sat Oct  8 14:54:28 2016
@@ -1,18 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s
-
-target triple = "x86_64-unknown-unknown"
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
 
 ; widening shuffle v3float and then a add
 define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
-; CHECK-LABEL: shuf:
-; CHECK:       # BB#0: # %entry
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    addps %xmm1, %xmm0
-; CHECK-NEXT:    extractps $2, %xmm0, 8(%eax)
-; CHECK-NEXT:    extractps $1, %xmm0, 4(%eax)
-; CHECK-NEXT:    movss %xmm0, (%eax)
-; CHECK-NEXT:    retl
+; X86-LABEL: shuf:
+; X86:       # BB#0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addps %xmm1, %xmm0
+; X86-NEXT:    extractps $2, %xmm0, 8(%eax)
+; X86-NEXT:    extractps $1, %xmm0, 4(%eax)
+; X86-NEXT:    movss %xmm0, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: shuf:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    addps %xmm1, %xmm0
+; X64-NEXT:    extractps $2, %xmm0, 8(%rdi)
+; X64-NEXT:    movlps %xmm0, (%rdi)
+; X64-NEXT:    retq
 entry:
 	%x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 1, i32 2>
 	%val = fadd <3 x float> %x, %src2
@@ -23,15 +29,23 @@ entry:
 
 ; widening shuffle v3float with a different mask and then a add
 define void @shuf2(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
-; CHECK-LABEL: shuf2:
-; CHECK:       # BB#0: # %entry
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; CHECK-NEXT:    addps %xmm1, %xmm0
-; CHECK-NEXT:    extractps $2, %xmm0, 8(%eax)
-; CHECK-NEXT:    extractps $1, %xmm0, 4(%eax)
-; CHECK-NEXT:    movss %xmm0, (%eax)
-; CHECK-NEXT:    retl
+; X86-LABEL: shuf2:
+; X86:       # BB#0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; X86-NEXT:    addps %xmm1, %xmm0
+; X86-NEXT:    extractps $2, %xmm0, 8(%eax)
+; X86-NEXT:    extractps $1, %xmm0, 4(%eax)
+; X86-NEXT:    movss %xmm0, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: shuf2:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; X64-NEXT:    addps %xmm1, %xmm0
+; X64-NEXT:    extractps $2, %xmm0, 8(%rdi)
+; X64-NEXT:    movlps %xmm0, (%rdi)
+; X64-NEXT:    retq
 entry:
 	%x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 4, i32 2>
 	%val = fadd <3 x float> %x, %src2
@@ -43,12 +57,18 @@ entry:
 ; with the operation that we are currently widening, i.e. when replacing
 ; opA with opB, the DAG will produce new operations with opA.
 define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
-; CHECK-LABEL: shuf3:
-; CHECK:       # BB#0: # %entry
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; CHECK-NEXT:    movaps %xmm1, (%eax)
-; CHECK-NEXT:    retl
+; X86-LABEL: shuf3:
+; X86:       # BB#0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X86-NEXT:    movaps %xmm1, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: shuf3:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X64-NEXT:    movaps %xmm1, (%rdi)
+; X64-NEXT:    retq
 entry:
   %shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
@@ -67,26 +87,41 @@ entry:
 
 ; PR10421: make sure we correctly handle extreme widening with CONCAT_VECTORS
 define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone {
-; CHECK-LABEL: shuf4:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; CHECK-NEXT:    pshufb %xmm2, %xmm1
-; CHECK-NEXT:    pshufb %xmm2, %xmm0
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; CHECK-NEXT:    retl
+; X86-LABEL: shuf4:
+; X86:       # BB#0:
+; X86-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; X86-NEXT:    pshufb %xmm2, %xmm1
+; X86-NEXT:    pshufb %xmm2, %xmm0
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-NEXT:    retl
+;
+; X64-LABEL: shuf4:
+; X64:       # BB#0:
+; X64-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; X64-NEXT:    pshufb %xmm2, %xmm1
+; X64-NEXT:    pshufb %xmm2, %xmm0
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT:    retq
   %vshuf = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i8> %vshuf
 }
 
 ; PR11389: another CONCAT_VECTORS case
 define void @shuf5(<8 x i8>* %p) nounwind {
-; CHECK-LABEL: shuf5:
-; CHECK:       # BB#0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = [33,33,33,33,33,33,33,33]
-; CHECK-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; CHECK-NEXT:    movq %xmm0, (%eax)
-; CHECK-NEXT:    retl
+; X86-LABEL: shuf5:
+; X86:       # BB#0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movdqa {{.*#+}} xmm0 = [33,33,33,33,33,33,33,33]
+; X86-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: shuf5:
+; X64:       # BB#0:
+; X64-NEXT:    movdqa {{.*#+}} xmm0 = [33,33,33,33,33,33,33,33]
+; X64-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; X64-NEXT:    movq %xmm0, (%rdi)
+; X64-NEXT:    retq
   %v = shufflevector <2 x i8> <i8 4, i8 33>, <2 x i8> undef, <8 x i32> <i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   store <8 x i8> %v, <8 x i8>* %p, align 8
   ret void




More information about the llvm-commits mailing list