[llvm] r227130 - [x86][MMX] Rename and cleanup tests: arith, intrinsics and shuffle

Bruno Cardoso Lopes bruno.cardoso at gmail.com
Mon Jan 26 12:06:52 PST 2015


Author: bruno
Date: Mon Jan 26 14:06:51 2015
New Revision: 227130

URL: http://llvm.org/viewvc/llvm-project?rev=227130&view=rev
Log:
[x86][MMX] Rename and cleanup tests: arith, intrinsics and shuffle

- Rename mmx-builtins to mmx-intrinsics to match other intrinsic test naming.
- Remove tests that duplicate functionality from mmx-intrinsics.ll.
- Move arith related tests to mmx-arith.ll.
- MMX related shuffle goes to vector-shuffle-mmx.ll.

Added:
    llvm/trunk/test/CodeGen/X86/mmx-intrinsics.ll
      - copied, changed from r227121, llvm/trunk/test/CodeGen/X86/mmx-builtins.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll
Removed:
    llvm/trunk/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll
    llvm/trunk/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll
    llvm/trunk/test/CodeGen/X86/mmx-builtins.ll
    llvm/trunk/test/CodeGen/X86/mmx-punpckhdq.ll
    llvm/trunk/test/CodeGen/X86/mmx-shift.ll
    llvm/trunk/test/CodeGen/X86/mmx-shuffle.ll
Modified:
    llvm/trunk/test/CodeGen/X86/mmx-arith.ll

Removed: llvm/trunk/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll?rev=227129&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll (removed)
@@ -1,64 +0,0 @@
-; RUN: llc < %s -o - -march=x86 -mattr=+mmx | FileCheck %s
-; There are no MMX instructions here.  We use add+adcl for the adds.
-
-define <1 x i64> @unsigned_add3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind {
-entry:
-	%tmp2942 = icmp eq i32 %count, 0		; <i1> [#uses=1]
-	br i1 %tmp2942, label %bb31, label %bb26
-
-bb26:		; preds = %bb26, %entry
-
-; CHECK:  addl
-; CHECK:  adcl
-
-	%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]		; <i32> [#uses=3]
-	%sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]		; <<1 x i64>> [#uses=1]
-	%tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0		; <<1 x i64>*> [#uses=1]
-	%tmp14 = load <1 x i64>* %tmp13		; <<1 x i64>> [#uses=1]
-	%tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0		; <<1 x i64>*> [#uses=1]
-	%tmp19 = load <1 x i64>* %tmp18		; <<1 x i64>> [#uses=1]
-	%tmp21 = add <1 x i64> %tmp19, %tmp14		; <<1 x i64>> [#uses=1]
-	%tmp22 = add <1 x i64> %tmp21, %sum.035.0		; <<1 x i64>> [#uses=2]
-	%tmp25 = add i32 %i.037.0, 1		; <i32> [#uses=2]
-	%tmp29 = icmp ult i32 %tmp25, %count		; <i1> [#uses=1]
-	br i1 %tmp29, label %bb26, label %bb31
-
-bb31:		; preds = %bb26, %entry
-	%sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]		; <<1 x i64>> [#uses=1]
-	ret <1 x i64> %sum.035.1
-}
-
-
-; This is the original test converted to use MMX intrinsics.
-
-define <1 x i64> @unsigned_add3a(x86_mmx* %a, x86_mmx* %b, i32 %count) nounwind {
-entry:
-        %tmp2943 = bitcast <1 x i64><i64 0> to x86_mmx
-	%tmp2942 = icmp eq i32 %count, 0		; <i1> [#uses=1]
-	br i1 %tmp2942, label %bb31, label %bb26
-
-bb26:		; preds = %bb26, %entry
-
-; CHECK:  movq	({{.*}},8), %mm
-; CHECK:  paddq	({{.*}},8), %mm
-; CHECK:  paddq	%mm{{[0-7]}}, %mm
-
-	%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]		; <i32> [#uses=3]
-	%sum.035.0 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ]		; <x86_mmx> [#uses=1]
-	%tmp13 = getelementptr x86_mmx* %b, i32 %i.037.0		; <x86_mmx*> [#uses=1]
-	%tmp14 = load x86_mmx* %tmp13		; <x86_mmx> [#uses=1]
-	%tmp18 = getelementptr x86_mmx* %a, i32 %i.037.0		; <x86_mmx*> [#uses=1]
-	%tmp19 = load x86_mmx* %tmp18		; <x86_mmx> [#uses=1]
-	%tmp21 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp19, x86_mmx %tmp14)		; <x86_mmx> [#uses=1]
-	%tmp22 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp21, x86_mmx %sum.035.0)		; <x86_mmx> [#uses=2]
-	%tmp25 = add i32 %i.037.0, 1		; <i32> [#uses=2]
-	%tmp29 = icmp ult i32 %tmp25, %count		; <i1> [#uses=1]
-	br i1 %tmp29, label %bb26, label %bb31
-
-bb31:		; preds = %bb26, %entry
-	%sum.035.1 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ]		; <x86_mmx> [#uses=1]
-        %t = bitcast x86_mmx %sum.035.1 to <1 x i64>
-	ret <1 x i64> %t
-}
-
-declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)

Removed: llvm/trunk/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll?rev=227129&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll (removed)
@@ -1,100 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s
-; There are no MMX operations here, so we use XMM or i64.
-
-; CHECK: ti8
-define void @ti8(double %a, double %b) nounwind {
-entry:
-        %tmp1 = bitcast double %a to <8 x i8>
-        %tmp2 = bitcast double %b to <8 x i8>
-        %tmp3 = add <8 x i8> %tmp1, %tmp2
-; CHECK:  paddb
-        store <8 x i8> %tmp3, <8 x i8>* null
-        ret void
-}
-
-; CHECK: ti16
-define void @ti16(double %a, double %b) nounwind {
-entry:
-        %tmp1 = bitcast double %a to <4 x i16>
-        %tmp2 = bitcast double %b to <4 x i16>
-        %tmp3 = add <4 x i16> %tmp1, %tmp2
-; CHECK:  paddw
-        store <4 x i16> %tmp3, <4 x i16>* null
-        ret void
-}
-
-; CHECK: ti32
-define void @ti32(double %a, double %b) nounwind {
-entry:
-        %tmp1 = bitcast double %a to <2 x i32>
-        %tmp2 = bitcast double %b to <2 x i32>
-        %tmp3 = add <2 x i32> %tmp1, %tmp2
-; CHECK:  paddd
-        store <2 x i32> %tmp3, <2 x i32>* null
-        ret void
-}
-
-; CHECK: ti64
-define void @ti64(double %a, double %b) nounwind {
-entry:
-        %tmp1 = bitcast double %a to <1 x i64>
-        %tmp2 = bitcast double %b to <1 x i64>
-        %tmp3 = add <1 x i64> %tmp1, %tmp2
-; CHECK:  addq
-        store <1 x i64> %tmp3, <1 x i64>* null
-        ret void
-}
-
-; MMX intrinsics calls get us MMX instructions.
-; CHECK: ti8a
-define void @ti8a(double %a, double %b) nounwind {
-entry:
-        %tmp1 = bitcast double %a to x86_mmx
-; CHECK: movdq2q
-        %tmp2 = bitcast double %b to x86_mmx
-; CHECK: movdq2q
-        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
-        store x86_mmx %tmp3, x86_mmx* null
-        ret void
-}
-
-; CHECK: ti16a
-define void @ti16a(double %a, double %b) nounwind {
-entry:
-        %tmp1 = bitcast double %a to x86_mmx
-; CHECK: movdq2q
-        %tmp2 = bitcast double %b to x86_mmx
-; CHECK: movdq2q
-        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
-        store x86_mmx %tmp3, x86_mmx* null
-        ret void
-}
-
-; CHECK: ti32a
-define void @ti32a(double %a, double %b) nounwind {
-entry:
-        %tmp1 = bitcast double %a to x86_mmx
-; CHECK: movdq2q
-        %tmp2 = bitcast double %b to x86_mmx
-; CHECK: movdq2q
-        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
-        store x86_mmx %tmp3, x86_mmx* null
-        ret void
-}
-
-; CHECK: ti64a
-define void @ti64a(double %a, double %b) nounwind {
-entry:
-        %tmp1 = bitcast double %a to x86_mmx
-; CHECK: movdq2q
-        %tmp2 = bitcast double %b to x86_mmx
-; CHECK: movdq2q
-        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
-        store x86_mmx %tmp3, x86_mmx* null
-        ret void
-}
- 
-declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)

Modified: llvm/trunk/test/CodeGen/X86/mmx-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-arith.ll?rev=227130&r1=227129&r2=227130&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-arith.ll Mon Jan 26 14:06:51 2015
@@ -1,309 +1,308 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s
 
 ;; A basic sanity check to make sure that MMX arithmetic actually compiles.
 ;; First is a straight translation of the original with bitcasts as needed.
 
-define void @foo(x86_mmx* %A, x86_mmx* %B) {
+; X32-LABEL: test0
+; X64-LABEL: test0
+define void @test0(x86_mmx* %A, x86_mmx* %B) {
 entry:
-	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
-	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
-        %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
-	%tmp4 = add <8 x i8> %tmp1a, %tmp3a		; <<8 x i8>> [#uses=2]
-        %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
-	store x86_mmx %tmp4a, x86_mmx* %A
-	%tmp7 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4a, x86_mmx %tmp7 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp12, x86_mmx* %A
-	%tmp16 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp21, x86_mmx* %A
-	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
-        %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
-	%tmp28 = sub <8 x i8> %tmp21a, %tmp27a		; <<8 x i8>> [#uses=2]
-        %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
-	store x86_mmx %tmp28a, x86_mmx* %A
-	%tmp31 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28a, x86_mmx %tmp31 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp36, x86_mmx* %A
-	%tmp40 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp45, x86_mmx* %A
-	%tmp51 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
-        %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
-	%tmp52 = mul <8 x i8> %tmp45a, %tmp51a		; <<8 x i8>> [#uses=2]
-        %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
-	store x86_mmx %tmp52a, x86_mmx* %A
-	%tmp57 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
-	%tmp58 = and <8 x i8> %tmp52, %tmp57a		; <<8 x i8>> [#uses=2]
-        %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
-	store x86_mmx %tmp58a, x86_mmx* %A
-	%tmp63 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
-	%tmp64 = or <8 x i8> %tmp58, %tmp63a		; <<8 x i8>> [#uses=2]
-        %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
-	store x86_mmx %tmp64a, x86_mmx* %A
-	%tmp69 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
-        %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
-	%tmp70 = xor <8 x i8> %tmp64b, %tmp69a		; <<8 x i8>> [#uses=1]
-        %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
-	store x86_mmx %tmp70a, x86_mmx* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-define void @baz(x86_mmx* %A, x86_mmx* %B) {
-entry:
-	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
-	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
-        %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
-	%tmp4 = add <2 x i32> %tmp1a, %tmp3a		; <<2 x i32>> [#uses=2]
-        %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
-	store x86_mmx %tmp4a, x86_mmx* %A
-	%tmp9 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
-	%tmp10 = sub <2 x i32> %tmp4, %tmp9a		; <<2 x i32>> [#uses=2]
-        %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
-	store x86_mmx %tmp10a, x86_mmx* %A
-	%tmp15 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
-        %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
-	%tmp16 = mul <2 x i32> %tmp10b, %tmp15a		; <<2 x i32>> [#uses=2]
-        %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
-	store x86_mmx %tmp16a, x86_mmx* %A
-	%tmp21 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
-        %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
-	%tmp22 = and <2 x i32> %tmp16b, %tmp21a		; <<2 x i32>> [#uses=2]
-        %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
-	store x86_mmx %tmp22a, x86_mmx* %A
-	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
-        %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
-	%tmp28 = or <2 x i32> %tmp22b, %tmp27a		; <<2 x i32>> [#uses=2]
-        %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
-	store x86_mmx %tmp28a, x86_mmx* %A
-	%tmp33 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
-        %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
-	%tmp34 = xor <2 x i32> %tmp28b, %tmp33a		; <<2 x i32>> [#uses=1]
-        %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
-	store x86_mmx %tmp34a, x86_mmx* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-define void @bar(x86_mmx* %A, x86_mmx* %B) {
-entry:
-	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
-	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
-        %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
-	%tmp4 = add <4 x i16> %tmp1a, %tmp3a		; <<4 x i16>> [#uses=2]
-        %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
-	store x86_mmx %tmp4a, x86_mmx* %A
-	%tmp7 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4a, x86_mmx %tmp7 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp12, x86_mmx* %A
-	%tmp16 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp21, x86_mmx* %A
-	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
-        %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
-	%tmp28 = sub <4 x i16> %tmp21a, %tmp27a		; <<4 x i16>> [#uses=2]
-        %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
-	store x86_mmx %tmp28a, x86_mmx* %A
-	%tmp31 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28a, x86_mmx %tmp31 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp36, x86_mmx* %A
-	%tmp40 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp45, x86_mmx* %A
-	%tmp51 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
-        %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
-	%tmp52 = mul <4 x i16> %tmp45a, %tmp51a		; <<4 x i16>> [#uses=2]
-        %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
-	store x86_mmx %tmp52a, x86_mmx* %A
-	%tmp55 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52a, x86_mmx %tmp55 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp60, x86_mmx* %A
-	%tmp64 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 )		; <x86_mmx> [#uses=1]
-	%tmp70 = bitcast x86_mmx %tmp69 to x86_mmx		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp70, x86_mmx* %A
-	%tmp75 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
-        %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
-	%tmp76 = and <4 x i16> %tmp70a, %tmp75a		; <<4 x i16>> [#uses=2]
-        %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
-	store x86_mmx %tmp76a, x86_mmx* %A
-	%tmp81 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
-        %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
-	%tmp82 = or <4 x i16> %tmp76b, %tmp81a		; <<4 x i16>> [#uses=2]
-        %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
-	store x86_mmx %tmp82a, x86_mmx* %A
-	%tmp87 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
-        %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
-	%tmp88 = xor <4 x i16> %tmp82b, %tmp87a		; <<4 x i16>> [#uses=1]
-        %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
-	store x86_mmx %tmp88a, x86_mmx* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-;; The following is modified to use MMX intrinsics everywhere they work.
-
-define void @fooa(x86_mmx* %A, x86_mmx* %B) {
-entry:
-	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
-	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.b( x86_mmx %tmp1, x86_mmx %tmp3 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp4, x86_mmx* %A
-	%tmp7 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4, x86_mmx %tmp7 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp12, x86_mmx* %A
-	%tmp16 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp21, x86_mmx* %A
-	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.b( x86_mmx %tmp21, x86_mmx %tmp27 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp28, x86_mmx* %A
-	%tmp31 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28, x86_mmx %tmp31 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp36, x86_mmx* %A
-	%tmp40 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp45, x86_mmx* %A
-	%tmp51 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp51a = bitcast x86_mmx %tmp51 to i64
-        %tmp51aa = bitcast i64 %tmp51a to <8 x i8>
-        %tmp51b = bitcast x86_mmx %tmp45 to <8 x i8>
-	%tmp52 = mul <8 x i8> %tmp51b, %tmp51aa		; <x86_mmx> [#uses=2]
-        %tmp52a = bitcast <8 x i8> %tmp52 to i64
-        %tmp52aa = bitcast i64 %tmp52a to x86_mmx
-	store x86_mmx %tmp52aa, x86_mmx* %A
-	%tmp57 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp58 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp51, x86_mmx %tmp57 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp58, x86_mmx* %A
-	%tmp63 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp64 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp58, x86_mmx %tmp63 )		; <x86_mmx> [#uses=2]	
-	store x86_mmx %tmp64, x86_mmx* %A
-	%tmp69 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp70 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp64, x86_mmx %tmp69 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp70, x86_mmx* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-define void @baza(x86_mmx* %A, x86_mmx* %B) {
-entry:
-	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
-	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.d( x86_mmx %tmp1, x86_mmx %tmp3 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp4, x86_mmx* %A
-	%tmp9 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp10 = tail call x86_mmx @llvm.x86.mmx.psub.d( x86_mmx %tmp4, x86_mmx %tmp9 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp10, x86_mmx* %A
-	%tmp15 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-        %tmp10a = bitcast x86_mmx %tmp10 to <2 x i32>
-        %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
-	%tmp16 = mul <2 x i32> %tmp10a, %tmp15a		; <x86_mmx> [#uses=2]
-        %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
-	store x86_mmx %tmp16a, x86_mmx* %A
-	%tmp21 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp22 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp16a, x86_mmx %tmp21 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp22, x86_mmx* %A
-	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp28 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp22, x86_mmx %tmp27 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp28, x86_mmx* %A
-	%tmp33 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp34 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp28, x86_mmx %tmp33 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp34, x86_mmx* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-define void @bara(x86_mmx* %A, x86_mmx* %B) {
-entry:
-	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
-	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.w( x86_mmx %tmp1, x86_mmx %tmp3 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp4, x86_mmx* %A
-	%tmp7 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4, x86_mmx %tmp7 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp12, x86_mmx* %A
-	%tmp16 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp21, x86_mmx* %A
-	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.w( x86_mmx %tmp21, x86_mmx %tmp27 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp28, x86_mmx* %A
-	%tmp31 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28, x86_mmx %tmp31 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp36, x86_mmx* %A
-	%tmp40 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp45, x86_mmx* %A
-	%tmp51 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp52 = tail call x86_mmx @llvm.x86.mmx.pmull.w( x86_mmx %tmp45, x86_mmx %tmp51 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp52, x86_mmx* %A
-	%tmp55 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52, x86_mmx %tmp55 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp60, x86_mmx* %A
-	%tmp64 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 )		; <x86_mmx> [#uses=1]
-	%tmp70 = bitcast x86_mmx %tmp69 to x86_mmx		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp70, x86_mmx* %A
-	%tmp75 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp76 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp70, x86_mmx %tmp75 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp76, x86_mmx* %A
-	%tmp81 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp82 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp76, x86_mmx %tmp81 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp82, x86_mmx* %A
-	%tmp87 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
-	%tmp88 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp82, x86_mmx %tmp87 )		; <x86_mmx> [#uses=2]
-	store x86_mmx %tmp88, x86_mmx* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
+  %tmp1 = load x86_mmx* %A
+  %tmp3 = load x86_mmx* %B
+  %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
+  %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
+  %tmp4 = add <8 x i8> %tmp1a, %tmp3a
+  %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
+  store x86_mmx %tmp4a, x86_mmx* %A
+  %tmp7 = load x86_mmx* %B
+  %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %tmp4a, x86_mmx %tmp7)
+  store x86_mmx %tmp12, x86_mmx* %A
+  %tmp16 = load x86_mmx* %B
+  %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16)
+  store x86_mmx %tmp21, x86_mmx* %A
+  %tmp27 = load x86_mmx* %B
+  %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
+  %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
+  %tmp28 = sub <8 x i8> %tmp21a, %tmp27a
+  %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
+  store x86_mmx %tmp28a, x86_mmx* %A
+  %tmp31 = load x86_mmx* %B
+  %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %tmp28a, x86_mmx %tmp31)
+  store x86_mmx %tmp36, x86_mmx* %A
+  %tmp40 = load x86_mmx* %B
+  %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %tmp36, x86_mmx %tmp40)
+  store x86_mmx %tmp45, x86_mmx* %A
+  %tmp51 = load x86_mmx* %B
+  %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
+  %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
+  %tmp52 = mul <8 x i8> %tmp45a, %tmp51a
+  %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
+  store x86_mmx %tmp52a, x86_mmx* %A
+  %tmp57 = load x86_mmx* %B
+  %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
+  %tmp58 = and <8 x i8> %tmp52, %tmp57a
+  %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
+  store x86_mmx %tmp58a, x86_mmx* %A
+  %tmp63 = load x86_mmx* %B
+  %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
+  %tmp64 = or <8 x i8> %tmp58, %tmp63a
+  %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
+  store x86_mmx %tmp64a, x86_mmx* %A
+  %tmp69 = load x86_mmx* %B
+  %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
+  %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
+  %tmp70 = xor <8 x i8> %tmp64b, %tmp69a
+  %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
+  store x86_mmx %tmp70a, x86_mmx* %A
+  tail call void @llvm.x86.mmx.emms()
+  ret void
 }
 
-declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
+; X32-LABEL: test1
+; X64-LABEL: test1
+define void @test1(x86_mmx* %A, x86_mmx* %B) {
+entry:
+  %tmp1 = load x86_mmx* %A
+  %tmp3 = load x86_mmx* %B
+  %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
+  %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
+  %tmp4 = add <2 x i32> %tmp1a, %tmp3a
+  %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
+  store x86_mmx %tmp4a, x86_mmx* %A
+  %tmp9 = load x86_mmx* %B
+  %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
+  %tmp10 = sub <2 x i32> %tmp4, %tmp9a
+  %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
+  store x86_mmx %tmp10a, x86_mmx* %A
+  %tmp15 = load x86_mmx* %B
+  %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
+  %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
+  %tmp16 = mul <2 x i32> %tmp10b, %tmp15a
+  %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
+  store x86_mmx %tmp16a, x86_mmx* %A
+  %tmp21 = load x86_mmx* %B
+  %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
+  %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
+  %tmp22 = and <2 x i32> %tmp16b, %tmp21a
+  %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
+  store x86_mmx %tmp22a, x86_mmx* %A
+  %tmp27 = load x86_mmx* %B
+  %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
+  %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
+  %tmp28 = or <2 x i32> %tmp22b, %tmp27a
+  %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
+  store x86_mmx %tmp28a, x86_mmx* %A
+  %tmp33 = load x86_mmx* %B
+  %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
+  %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
+  %tmp34 = xor <2 x i32> %tmp28b, %tmp33a
+  %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
+  store x86_mmx %tmp34a, x86_mmx* %A
+  tail call void @llvm.x86.mmx.emms( )
+  ret void
+}
 
-declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
+; X32-LABEL: test2
+; X64-LABEL: test2
+define void @test2(x86_mmx* %A, x86_mmx* %B) {
+entry:
+  %tmp1 = load x86_mmx* %A
+  %tmp3 = load x86_mmx* %B
+  %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
+  %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
+  %tmp4 = add <4 x i16> %tmp1a, %tmp3a
+  %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
+  store x86_mmx %tmp4a, x86_mmx* %A
+  %tmp7 = load x86_mmx* %B
+  %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %tmp4a, x86_mmx %tmp7)
+  store x86_mmx %tmp12, x86_mmx* %A
+  %tmp16 = load x86_mmx* %B
+  %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16)
+  store x86_mmx %tmp21, x86_mmx* %A
+  %tmp27 = load x86_mmx* %B
+  %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
+  %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
+  %tmp28 = sub <4 x i16> %tmp21a, %tmp27a
+  %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
+  store x86_mmx %tmp28a, x86_mmx* %A
+  %tmp31 = load x86_mmx* %B
+  %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %tmp28a, x86_mmx %tmp31)
+  store x86_mmx %tmp36, x86_mmx* %A
+  %tmp40 = load x86_mmx* %B
+  %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %tmp36, x86_mmx %tmp40)
+  store x86_mmx %tmp45, x86_mmx* %A
+  %tmp51 = load x86_mmx* %B
+  %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
+  %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
+  %tmp52 = mul <4 x i16> %tmp45a, %tmp51a
+  %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
+  store x86_mmx %tmp52a, x86_mmx* %A
+  %tmp55 = load x86_mmx* %B
+  %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %tmp52a, x86_mmx %tmp55)
+  store x86_mmx %tmp60, x86_mmx* %A
+  %tmp64 = load x86_mmx* %B
+  %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64)
+  %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx
+  store x86_mmx %tmp70, x86_mmx* %A
+  %tmp75 = load x86_mmx* %B
+  %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
+  %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
+  %tmp76 = and <4 x i16> %tmp70a, %tmp75a
+  %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
+  store x86_mmx %tmp76a, x86_mmx* %A
+  %tmp81 = load x86_mmx* %B
+  %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
+  %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
+  %tmp82 = or <4 x i16> %tmp76b, %tmp81a
+  %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
+  store x86_mmx %tmp82a, x86_mmx* %A
+  %tmp87 = load x86_mmx* %B
+  %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
+  %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
+  %tmp88 = xor <4 x i16> %tmp82b, %tmp87a
+  %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
+  store x86_mmx %tmp88a, x86_mmx* %A
+  tail call void @llvm.x86.mmx.emms( )
+  ret void
+}
 
-declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
+; X32-LABEL: test3
+define <1 x i64> @test3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind {
+entry:
+  %tmp2942 = icmp eq i32 %count, 0
+  br i1 %tmp2942, label %bb31, label %bb26
 
-declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
+bb26:
+; X32:  addl
+; X32:  adcl
+  %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]
+  %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
+  %tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0
+  %tmp14 = load <1 x i64>* %tmp13
+  %tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0
+  %tmp19 = load <1 x i64>* %tmp18
+  %tmp21 = add <1 x i64> %tmp19, %tmp14
+  %tmp22 = add <1 x i64> %tmp21, %sum.035.0
+  %tmp25 = add i32 %i.037.0, 1
+  %tmp29 = icmp ult i32 %tmp25, %count
+  br i1 %tmp29, label %bb26, label %bb31
+
+bb31:
+  %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
+  ret <1 x i64> %sum.035.1
+}
 
-declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
+; There are no MMX operations here, so we use XMM or i64.
+; X64-LABEL: ti8
+define void @ti8(double %a, double %b) nounwind {
+entry:
+  %tmp1 = bitcast double %a to <8 x i8>
+  %tmp2 = bitcast double %b to <8 x i8>
+  %tmp3 = add <8 x i8> %tmp1, %tmp2
+; X64:  paddb
+  store <8 x i8> %tmp3, <8 x i8>* null
+  ret void
+}
 
-declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx)
+; X64-LABEL: ti16
+define void @ti16(double %a, double %b) nounwind {
+entry:
+  %tmp1 = bitcast double %a to <4 x i16>
+  %tmp2 = bitcast double %b to <4 x i16>
+  %tmp3 = add <4 x i16> %tmp1, %tmp2
+; X64:  paddw
+  store <4 x i16> %tmp3, <4 x i16>* null
+  ret void
+}
 
-declare void @llvm.x86.mmx.emms()
+; X64-LABEL: ti32
+define void @ti32(double %a, double %b) nounwind {
+entry:
+  %tmp1 = bitcast double %a to <2 x i32>
+  %tmp2 = bitcast double %b to <2 x i32>
+  %tmp3 = add <2 x i32> %tmp1, %tmp2
+; X64:  paddd
+  store <2 x i32> %tmp3, <2 x i32>* null
+  ret void
+}
+
+; X64-LABEL: ti64
+define void @ti64(double %a, double %b) nounwind {
+entry:
+  %tmp1 = bitcast double %a to <1 x i64>
+  %tmp2 = bitcast double %b to <1 x i64>
+  %tmp3 = add <1 x i64> %tmp1, %tmp2
+; X64:  addq
+  store <1 x i64> %tmp3, <1 x i64>* null
+  ret void
+}
+
+; MMX intrinsics calls get us MMX instructions.
+; X64-LABEL: ti8a
+define void @ti8a(double %a, double %b) nounwind {
+entry:
+  %tmp1 = bitcast double %a to x86_mmx
+; X64: movdq2q
+  %tmp2 = bitcast double %b to x86_mmx
+; X64: movdq2q
+  %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
+  store x86_mmx %tmp3, x86_mmx* null
+  ret void
+}
+
+; X64-LABEL: ti16a
+define void @ti16a(double %a, double %b) nounwind {
+entry:
+  %tmp1 = bitcast double %a to x86_mmx
+; X64: movdq2q
+  %tmp2 = bitcast double %b to x86_mmx
+; X64: movdq2q
+  %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
+  store x86_mmx %tmp3, x86_mmx* null
+  ret void
+}
+
+; X64-LABEL: ti32a
+define void @ti32a(double %a, double %b) nounwind {
+entry:
+  %tmp1 = bitcast double %a to x86_mmx
+; X64: movdq2q
+  %tmp2 = bitcast double %b to x86_mmx
+; X64: movdq2q
+  %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
+  store x86_mmx %tmp3, x86_mmx* null
+  ret void
+}
+
+; X64-LABEL: ti64a
+define void @ti64a(double %a, double %b) nounwind {
+entry:
+  %tmp1 = bitcast double %a to x86_mmx
+; X64: movdq2q
+  %tmp2 = bitcast double %b to x86_mmx
+; X64: movdq2q
+  %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
+  store x86_mmx %tmp3, x86_mmx* null
+  ret void
+}
 
 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
+
+declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx)
+
+declare void @llvm.x86.mmx.emms()
+
 declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx)
 declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padds.d(x86_mmx, x86_mmx)
 declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx)
 declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.psubs.d(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.psub.b(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.psub.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.psub.d(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.pand(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.pxor(x86_mmx, x86_mmx)
 

Removed: llvm/trunk/test/CodeGen/X86/mmx-builtins.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-builtins.ll?rev=227129&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-builtins.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-builtins.ll (removed)
@@ -1,1349 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx,+ssse3,-avx | FileCheck %s
-; RUN: llc < %s -march=x86 -mattr=+avx | FileCheck %s
-; RUN: llc < %s -march=x86-64 -mattr=+mmx,+ssse3,-avx | FileCheck %s
-; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s
-
-declare x86_mmx @llvm.x86.ssse3.phadd.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test1(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: phaddw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %2 = bitcast <4 x i16> %1 to x86_mmx
-  %3 = bitcast <4 x i16> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.phadd.w(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <4 x i16>
-  %6 = bitcast <4 x i16> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.mmx.pcmpgt.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test88(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pcmpgtd
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pcmpgt.d(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pcmpgt.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test87(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pcmpgtw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pcmpgt.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pcmpgt.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test86(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pcmpgtb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pcmpgt.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pcmpeq.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test85(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pcmpeqd
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pcmpeq.d(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pcmpeq.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test84(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pcmpeqw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pcmpeq.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pcmpeq.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test83(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pcmpeqb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pcmpeq.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.punpckldq(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test82(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: punpckldq
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.punpckldq(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.punpcklwd(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test81(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: punpcklwd
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.punpcklwd(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.punpcklbw(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test80(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: punpcklbw
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.punpcklbw(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test79(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: punpckhdq
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.punpckhwd(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test78(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: punpckhwd
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.punpckhwd(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.punpckhbw(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test77(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: punpckhbw
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.punpckhbw(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.packuswb(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test76(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: packuswb
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.packuswb(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.packssdw(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test75(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: packssdw
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.packssdw(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.packsswb(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test74(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: packsswb
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.packsswb(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32) nounwind readnone
-
-define i64 @test73(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: psrad
-entry:
-  %0 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %0 to x86_mmx
-  %1 = tail call x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx %mmx_var.i, i32 3) nounwind
-  %2 = bitcast x86_mmx %1 to <2 x i32>
-  %3 = bitcast <2 x i32> %2 to <1 x i64>
-  %4 = extractelement <1 x i64> %3, i32 0
-  ret i64 %4
-}
-
-declare x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx, i32) nounwind readnone
-
-define i64 @test72(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: psraw
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %0 to x86_mmx
-  %1 = tail call x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx %mmx_var.i, i32 3) nounwind
-  %2 = bitcast x86_mmx %1 to <4 x i16>
-  %3 = bitcast <4 x i16> %2 to <1 x i64>
-  %4 = extractelement <1 x i64> %3, i32 0
-  ret i64 %4
-}
-
-declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32) nounwind readnone
-
-define i64 @test71(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: psrlq
-entry:
-  %0 = extractelement <1 x i64> %a, i32 0
-  %mmx_var.i = bitcast i64 %0 to x86_mmx
-  %1 = tail call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %mmx_var.i, i32 3) nounwind
-  %2 = bitcast x86_mmx %1 to i64
-  ret i64 %2
-}
-
-declare x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx, i32) nounwind readnone
-
-define i64 @test70(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: psrld
-entry:
-  %0 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %0 to x86_mmx
-  %1 = tail call x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx %mmx_var.i, i32 3) nounwind
-  %2 = bitcast x86_mmx %1 to <2 x i32>
-  %3 = bitcast <2 x i32> %2 to <1 x i64>
-  %4 = extractelement <1 x i64> %3, i32 0
-  ret i64 %4
-}
-
-declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) nounwind readnone
-
-define i64 @test69(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: psrlw
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %0 to x86_mmx
-  %1 = tail call x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx %mmx_var.i, i32 3) nounwind
-  %2 = bitcast x86_mmx %1 to <4 x i16>
-  %3 = bitcast <4 x i16> %2 to <1 x i64>
-  %4 = extractelement <1 x i64> %3, i32 0
-  ret i64 %4
-}
-
-declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone
-
-define i64 @test68(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: psllq
-entry:
-  %0 = extractelement <1 x i64> %a, i32 0
-  %mmx_var.i = bitcast i64 %0 to x86_mmx
-  %1 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %mmx_var.i, i32 3) nounwind
-  %2 = bitcast x86_mmx %1 to i64
-  ret i64 %2
-}
-
-declare x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx, i32) nounwind readnone
-
-define i64 @test67(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: pslld
-entry:
-  %0 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %0 to x86_mmx
-  %1 = tail call x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx %mmx_var.i, i32 3) nounwind
-  %2 = bitcast x86_mmx %1 to <2 x i32>
-  %3 = bitcast <2 x i32> %2 to <1 x i64>
-  %4 = extractelement <1 x i64> %3, i32 0
-  ret i64 %4
-}
-
-declare x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx, i32) nounwind readnone
-
-define i64 @test66(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: psllw
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %0 to x86_mmx
-  %1 = tail call x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx %mmx_var.i, i32 3) nounwind
-  %2 = bitcast x86_mmx %1 to <4 x i16>
-  %3 = bitcast <4 x i16> %2 to <1 x i64>
-  %4 = extractelement <1 x i64> %3, i32 0
-  ret i64 %4
-}
-
-declare x86_mmx @llvm.x86.mmx.psra.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test65(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psrad
-entry:
-  %0 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1.i = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psra.d(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psra.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test64(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psraw
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1.i = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psra.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test63(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psrlq
-entry:
-  %0 = extractelement <1 x i64> %a, i32 0
-  %mmx_var.i = bitcast i64 %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1.i = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to i64
-  ret i64 %3
-}
-
-declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test62(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psrld
-entry:
-  %0 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1.i = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test61(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psrlw
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1.i = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psll.q(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test60(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psllq
-entry:
-  %0 = extractelement <1 x i64> %a, i32 0
-  %mmx_var.i = bitcast i64 %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1.i = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psll.q(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to i64
-  ret i64 %3
-}
-
-declare x86_mmx @llvm.x86.mmx.psll.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test59(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pslld
-entry:
-  %0 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1.i = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psll.d(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psll.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test58(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psllw
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1.i = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pxor(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test56(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pxor
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pxor(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test55(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: por
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.por(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pandn(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test54(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pandn
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pandn(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pand(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test53(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pand
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pand(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test52(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmullw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-define i64 @test51(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmullw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test50(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmulhw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test49(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmaddwd
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test48(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psubusw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test47(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psubusb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test46(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psubsw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test45(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psubsb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-define i64 @test44(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psubq
-entry:
-  %0 = extractelement <1 x i64> %a, i32 0
-  %mmx_var = bitcast i64 %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1 = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psub.q(x86_mmx %mmx_var, x86_mmx %mmx_var1)
-  %3 = bitcast x86_mmx %2 to i64
-  ret i64 %3
-}
-
-declare x86_mmx @llvm.x86.mmx.psub.q(x86_mmx, x86_mmx) nounwind readnone
-
-declare x86_mmx @llvm.x86.mmx.psub.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test43(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psubd
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psub.d(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psub.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test42(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psubw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psub.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psub.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test41(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psubb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psub.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test40(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: paddusw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test39(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: paddusb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test38(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: paddsw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test37(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: paddsb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test36(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: paddq
-entry:
-  %0 = extractelement <1 x i64> %a, i32 0
-  %mmx_var = bitcast i64 %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1 = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %mmx_var, x86_mmx %mmx_var1)
-  %3 = bitcast x86_mmx %2 to i64
-  ret i64 %3
-}
-
-declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test35(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: paddd
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test34(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: paddw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test33(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: paddb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.psad.bw(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test32(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psadbw
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.psad.bw(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to i64
-  ret i64 %3
-}
-
-declare x86_mmx @llvm.x86.mmx.pmins.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test31(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pminsw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmins.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pminu.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test30(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pminub
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pminu.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pmaxs.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test29(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmaxsw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmaxs.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pmaxu.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test28(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmaxub
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmaxu.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pavg.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test27(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pavgw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pavg.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pavg.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test26(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pavgb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pavg.b(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare void @llvm.x86.mmx.movnt.dq(x86_mmx*, x86_mmx) nounwind
-
-define void @test25(<1 x i64>* %p, <1 x i64> %a) nounwind optsize ssp {
-; CHECK: movntq
-entry:
-  %mmx_ptr_var.i = bitcast <1 x i64>* %p to x86_mmx*
-  %0 = extractelement <1 x i64> %a, i32 0
-  %mmx_var.i = bitcast i64 %0 to x86_mmx
-  tail call void @llvm.x86.mmx.movnt.dq(x86_mmx* %mmx_ptr_var.i, x86_mmx %mmx_var.i) nounwind
-  ret void
-}
-
-declare i32 @llvm.x86.mmx.pmovmskb(x86_mmx) nounwind readnone
-
-define i32 @test24(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: pmovmskb
-entry:
-  %0 = bitcast <1 x i64> %a to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %0 to x86_mmx
-  %1 = tail call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %mmx_var.i) nounwind
-  ret i32 %1
-}
-
-declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*) nounwind
-
-define void @test23(<1 x i64> %d, <1 x i64> %n, i8* %p) nounwind optsize ssp {
-; CHECK: maskmovq
-entry:
-  %0 = bitcast <1 x i64> %n to <8 x i8>
-  %1 = bitcast <1 x i64> %d to <8 x i8>
-  %mmx_var.i = bitcast <8 x i8> %1 to x86_mmx
-  %mmx_var1.i = bitcast <8 x i8> %0 to x86_mmx
-  tail call void @llvm.x86.mmx.maskmovq(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i, i8* %p) nounwind
-  ret void
-}
-
-declare x86_mmx @llvm.x86.mmx.pmulhu.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test22(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmulhuw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %mmx_var.i = bitcast <4 x i16> %1 to x86_mmx
-  %mmx_var1.i = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmulhu.w(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx, i8) nounwind readnone
-
-define i64 @test21(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: pshufw
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %1 = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 3) nounwind readnone
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-define i32 @test21_2(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: test21_2
-; CHECK: pshufw
-; CHECK: movd
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %1 = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 3) nounwind readnone
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <2 x i32>
-  %5 = extractelement <2 x i32> %4, i32 0
-  ret i32 %5
-}
-
-declare x86_mmx @llvm.x86.mmx.pmulu.dq(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test20(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmuludq
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %mmx_var.i = bitcast <2 x i32> %1 to x86_mmx
-  %mmx_var1.i = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.pmulu.dq(x86_mmx %mmx_var.i, x86_mmx %mmx_var1.i) nounwind
-  %3 = bitcast x86_mmx %2 to i64
-  ret i64 %3
-}
-
-declare <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx) nounwind readnone
-
-define <2 x double> @test19(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: cvtpi2pd
-entry:
-  %0 = bitcast <1 x i64> %a to <2 x i32>
-  %1 = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx %1) nounwind readnone
-  ret <2 x double> %2
-}
-
-declare x86_mmx @llvm.x86.sse.cvttpd2pi(<2 x double>) nounwind readnone
-
-define i64 @test18(<2 x double> %a) nounwind readnone optsize ssp {
-; CHECK: cvttpd2pi
-entry:
-  %0 = tail call x86_mmx @llvm.x86.sse.cvttpd2pi(<2 x double> %a) nounwind readnone
-  %1 = bitcast x86_mmx %0 to <2 x i32>
-  %2 = bitcast <2 x i32> %1 to <1 x i64>
-  %3 = extractelement <1 x i64> %2, i32 0
-  ret i64 %3
-}
-
-declare x86_mmx @llvm.x86.sse.cvtpd2pi(<2 x double>) nounwind readnone
-
-define i64 @test17(<2 x double> %a) nounwind readnone optsize ssp {
-; CHECK: cvtpd2pi
-entry:
-  %0 = tail call x86_mmx @llvm.x86.sse.cvtpd2pi(<2 x double> %a) nounwind readnone
-  %1 = bitcast x86_mmx %0 to <2 x i32>
-  %2 = bitcast <2 x i32> %1 to <1 x i64>
-  %3 = extractelement <1 x i64> %2, i32 0
-  ret i64 %3
-}
-
-declare x86_mmx @llvm.x86.mmx.palignr.b(x86_mmx, x86_mmx, i8) nounwind readnone
-
-define i64 @test16(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: palignr
-entry:
-  %0 = extractelement <1 x i64> %a, i32 0
-  %mmx_var = bitcast i64 %0 to x86_mmx
-  %1 = extractelement <1 x i64> %b, i32 0
-  %mmx_var1 = bitcast i64 %1 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.mmx.palignr.b(x86_mmx %mmx_var, x86_mmx %mmx_var1, i8 16)
-  %3 = bitcast x86_mmx %2 to i64
-  ret i64 %3
-}
-
-declare x86_mmx @llvm.x86.ssse3.pabs.d(x86_mmx) nounwind readnone
-
-define i64 @test15(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: pabsd
-entry:
-  %0 = bitcast <1 x i64> %a to <2 x i32>
-  %1 = bitcast <2 x i32> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.ssse3.pabs.d(x86_mmx %1) nounwind readnone
-  %3 = bitcast x86_mmx %2 to <2 x i32>
-  %4 = bitcast <2 x i32> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.ssse3.pabs.w(x86_mmx) nounwind readnone
-
-define i64 @test14(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: pabsw
-entry:
-  %0 = bitcast <1 x i64> %a to <4 x i16>
-  %1 = bitcast <4 x i16> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.ssse3.pabs.w(x86_mmx %1) nounwind readnone
-  %3 = bitcast x86_mmx %2 to <4 x i16>
-  %4 = bitcast <4 x i16> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.ssse3.pabs.b(x86_mmx) nounwind readnone
-
-define i64 @test13(<1 x i64> %a) nounwind readnone optsize ssp {
-; CHECK: pabsb
-entry:
-  %0 = bitcast <1 x i64> %a to <8 x i8>
-  %1 = bitcast <8 x i8> %0 to x86_mmx
-  %2 = tail call x86_mmx @llvm.x86.ssse3.pabs.b(x86_mmx %1) nounwind readnone
-  %3 = bitcast x86_mmx %2 to <8 x i8>
-  %4 = bitcast <8 x i8> %3 to <1 x i64>
-  %5 = extractelement <1 x i64> %4, i32 0
-  ret i64 %5
-}
-
-declare x86_mmx @llvm.x86.ssse3.psign.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test12(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psignd
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %2 = bitcast <2 x i32> %1 to x86_mmx
-  %3 = bitcast <2 x i32> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.psign.d(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <2 x i32>
-  %6 = bitcast <2 x i32> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.psign.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test11(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psignw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %2 = bitcast <4 x i16> %1 to x86_mmx
-  %3 = bitcast <4 x i16> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.psign.w(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <4 x i16>
-  %6 = bitcast <4 x i16> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.psign.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test10(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: psignb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %2 = bitcast <8 x i8> %1 to x86_mmx
-  %3 = bitcast <8 x i8> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.psign.b(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <8 x i8>
-  %6 = bitcast <8 x i8> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.pshuf.b(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test9(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pshufb
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %2 = bitcast <8 x i8> %1 to x86_mmx
-  %3 = bitcast <8 x i8> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.pshuf.b(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <8 x i8>
-  %6 = bitcast <8 x i8> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.pmul.hr.sw(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test8(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmulhrsw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %2 = bitcast <4 x i16> %1 to x86_mmx
-  %3 = bitcast <4 x i16> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.pmul.hr.sw(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <4 x i16>
-  %6 = bitcast <4 x i16> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.pmadd.ub.sw(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test7(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: pmaddubsw
-entry:
-  %0 = bitcast <1 x i64> %b to <8 x i8>
-  %1 = bitcast <1 x i64> %a to <8 x i8>
-  %2 = bitcast <8 x i8> %1 to x86_mmx
-  %3 = bitcast <8 x i8> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.pmadd.ub.sw(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <8 x i8>
-  %6 = bitcast <8 x i8> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.phsub.sw(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test6(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: phsubsw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %2 = bitcast <4 x i16> %1 to x86_mmx
-  %3 = bitcast <4 x i16> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.phsub.sw(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <4 x i16>
-  %6 = bitcast <4 x i16> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.phsub.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test5(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: phsubd
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %2 = bitcast <2 x i32> %1 to x86_mmx
-  %3 = bitcast <2 x i32> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.phsub.d(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <2 x i32>
-  %6 = bitcast <2 x i32> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.phsub.w(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test4(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: phsubw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %2 = bitcast <4 x i16> %1 to x86_mmx
-  %3 = bitcast <4 x i16> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.phsub.w(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <4 x i16>
-  %6 = bitcast <4 x i16> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.phadd.sw(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test3(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: phaddsw
-entry:
-  %0 = bitcast <1 x i64> %b to <4 x i16>
-  %1 = bitcast <1 x i64> %a to <4 x i16>
-  %2 = bitcast <4 x i16> %1 to x86_mmx
-  %3 = bitcast <4 x i16> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.phadd.sw(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <4 x i16>
-  %6 = bitcast <4 x i16> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-declare x86_mmx @llvm.x86.ssse3.phadd.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @test2(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp {
-; CHECK: phaddd
-entry:
-  %0 = bitcast <1 x i64> %b to <2 x i32>
-  %1 = bitcast <1 x i64> %a to <2 x i32>
-  %2 = bitcast <2 x i32> %1 to x86_mmx
-  %3 = bitcast <2 x i32> %0 to x86_mmx
-  %4 = tail call x86_mmx @llvm.x86.ssse3.phadd.d(x86_mmx %2, x86_mmx %3) nounwind readnone
-  %5 = bitcast x86_mmx %4 to <2 x i32>
-  %6 = bitcast <2 x i32> %5 to <1 x i64>
-  %7 = extractelement <1 x i64> %6, i32 0
-  ret i64 %7
-}
-
-define <4 x float> @test89(<4 x float> %a, x86_mmx %b) nounwind {
-; CHECK: cvtpi2ps
-  %c = tail call <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float> %a, x86_mmx %b)
-  ret <4 x float> %c
-}
-
-declare <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float>, x86_mmx) nounwind readnone

Copied: llvm/trunk/test/CodeGen/X86/mmx-intrinsics.ll (from r227121, llvm/trunk/test/CodeGen/X86/mmx-builtins.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-intrinsics.ll?p2=llvm/trunk/test/CodeGen/X86/mmx-intrinsics.ll&p1=llvm/trunk/test/CodeGen/X86/mmx-builtins.ll&r1=227121&r2=227130&rev=227130&view=diff
==============================================================================
    (empty)

Removed: llvm/trunk/test/CodeGen/X86/mmx-punpckhdq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-punpckhdq.ll?rev=227129&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-punpckhdq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-punpckhdq.ll (removed)
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse4.2 -mtriple=x86_64-apple-darwin10 | FileCheck %s
-; There are no MMX operations in bork; promoted to XMM.
-
-define void @bork(<1 x i64>* %x) {
-; CHECK: bork
-; CHECK: movlpd
-entry:
-	%tmp2 = load <1 x i64>* %x		; <<1 x i64>> [#uses=1]
-	%tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32>		; <<2 x i32>> [#uses=1]
-	%tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >		; <<2 x i32>> [#uses=1]
-	%tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64>		; <<1 x i64>> [#uses=1]
-	store <1 x i64> %tmp10, <1 x i64>* %x
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-; pork uses MMX.
-
-define void @pork(x86_mmx* %x) {
-; CHECK: pork
-; CHECK: punpckhdq
-entry:
-	%tmp2 = load x86_mmx* %x		; <x86_mmx> [#uses=1]
-        %tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2)
-	store x86_mmx %tmp9, x86_mmx* %x
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-declare x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx, x86_mmx)
-declare void @llvm.x86.mmx.emms()

Removed: llvm/trunk/test/CodeGen/X86/mmx-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-shift.ll?rev=227129&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-shift.ll (removed)
@@ -1,39 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | FileCheck %s
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | FileCheck %s
-
-define i64 @t1(<1 x i64> %mm1) nounwind  {
-entry:
-        %tmp = bitcast <1 x i64> %mm1 to x86_mmx
-	%tmp6 = tail call x86_mmx @llvm.x86.mmx.pslli.q( x86_mmx %tmp, i32 32 )		; <x86_mmx> [#uses=1]
-        %retval1112 = bitcast x86_mmx %tmp6 to i64
-	ret i64 %retval1112
-
-; CHECK-LABEL: t1:
-; CHECK: psllq $32
-}
-
-declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone 
-
-define i64 @t2(x86_mmx %mm1, x86_mmx %mm2) nounwind  {
-entry:
-	%tmp7 = tail call x86_mmx @llvm.x86.mmx.psra.d( x86_mmx %mm1, x86_mmx %mm2 ) nounwind readnone 		; <x86_mmx> [#uses=1]
-        %retval1112 = bitcast x86_mmx %tmp7 to i64
-	ret i64 %retval1112
-
-; CHECK-LABEL: t2:
-; CHECK: psrad
-}
-
-declare x86_mmx @llvm.x86.mmx.psra.d(x86_mmx, x86_mmx) nounwind readnone 
-
-define i64 @t3(x86_mmx %mm1, i32 %bits) nounwind  {
-entry:
-	%tmp8 = tail call x86_mmx @llvm.x86.mmx.psrli.w( x86_mmx %mm1, i32 %bits ) nounwind readnone 		; <x86_mmx> [#uses=1]
-        %retval1314 = bitcast x86_mmx %tmp8 to i64
-	ret i64 %retval1314
-
-; CHECK-LABEL: t3:
-; CHECK: psrlw
-}
-
-declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) nounwind readnone 

Removed: llvm/trunk/test/CodeGen/X86/mmx-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-shuffle.ll?rev=227129&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-shuffle.ll (removed)
@@ -1,31 +0,0 @@
-; RUN: llc < %s -mcpu=yonah
-; PR1427
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-pc-linux-gnu"
-	%struct.DrawHelper = type { void (i32, %struct.QT_FT_Span*, i8*)*, void (i32, %struct.QT_FT_Span*, i8*)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i32, i32)* }
-	%struct.QBasicAtomic = type { i32 }
-	%struct.QClipData = type { i32, %"struct.QClipData::ClipLine"*, i32, i32, %struct.QT_FT_Span*, i32, i32, i32, i32 }
-	%"struct.QClipData::ClipLine" = type { i32, %struct.QT_FT_Span* }
-	%struct.QRasterBuffer = type { %struct.QRect, %struct.QRegion, %struct.QClipData*, %struct.QClipData*, i8, i32, i32, %struct.DrawHelper*, i32, i32, i32, i8* }
-	%struct.QRect = type { i32, i32, i32, i32 }
-	%struct.QRegion = type { %"struct.QRegion::QRegionData"* }
-	%"struct.QRegion::QRegionData" = type { %struct.QBasicAtomic, %struct._XRegion*, i8*, %struct.QRegionPrivate* }
-	%struct.QRegionPrivate = type opaque
-	%struct.QT_FT_Span = type { i16, i16, i16, i8 }
-	%struct._XRegion = type opaque
-
-define void @_Z19qt_bitmapblit16_sseP13QRasterBufferiijPKhiii(%struct.QRasterBuffer* %rasterBuffer, i32 %x, i32 %y, i32 %color, i8* %src, i32 %width, i32 %height, i32 %stride) {
-entry:
-	%tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32>		; <<2 x i32>> [#uses=1]
-	%tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>)		; <<2 x i32>> [#uses=1]
-	%tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16>		; <<4 x i16>> [#uses=1]
-	%tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 >		; <<4 x i16>> [#uses=1]
-	%tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8>		; <<8 x i8>> [#uses=1]
-        %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
-        %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
-	tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null )
-	ret void
-}
-
-declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)

Added: llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll?rev=227130&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll Mon Jan 26 14:06:51 2015
@@ -0,0 +1,40 @@
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s
+
+; If there is no explicit MMX type usage, always promote to XMM.
+
+define void @test0(<1 x i64>* %x) {
+; X32-LABEL: test0
+; X64-LABEL: test0
+; X32: pshufd $213
+; X64: pshufd $213
+; X32-NEXT: movlpd %xmm
+; X64-NEXT: movq %xmm
+entry:
+  %tmp2 = load <1 x i64>* %x
+  %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32>
+  %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
+  %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64>
+  store <1 x i64> %tmp10, <1 x i64>* %x
+  ret void
+}
+
+define void @test1() {
+; X32-LABEL: test1:
+; X32:    pshuflw
+; X32-NEXT:    pshufhw
+; X32-NEXT:    pshufd
+; X32:    maskmovq
+entry:
+  %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32>
+  %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>)
+  %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16>
+  %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 >
+  %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8>
+  %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
+  %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
+  tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null)
+  ret void
+}
+
+declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)





More information about the llvm-commits mailing list