[llvm] r333832 - [X86][SSE] Cleanup SSE2 intrinsics tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 2 12:43:14 PDT 2018


Author: rksimon
Date: Sat Jun  2 12:43:14 2018
New Revision: 333832

URL: http://llvm.org/viewvc/llvm-project?rev=333832&view=rev
Log:
[X86][SSE] Cleanup SSE2 intrinsics tests

Ensure we cover 32/64-bit targets for SSE/AVX/AVX512 cases as necessary

Modified:
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll
    llvm/trunk/test/CodeGen/X86/sse2.ll

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll?rev=333832&r1=333831&r2=333832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll Sat Jun  2 12:43:14 2018
@@ -1,62 +1,94 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
 
 define i64 @test_mm_cvtsd_si64(<2 x double> %a0) nounwind {
-; X64-LABEL: test_mm_cvtsd_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtsd2si %xmm0, %rax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtsd_si64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtsd2si %xmm0, %rax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_mm_cvtsd_si64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtsd2si %xmm0, %rax
+; AVX-NEXT:    retq
   %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0)
   ret i64 %res
 }
 declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
 
 define i64 @test_mm_cvtsi128_si64(<2 x i64> %a0) nounwind {
-; X64-LABEL: test_mm_cvtsi128_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %xmm0, %rax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtsi128_si64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq %xmm0, %rax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_mm_cvtsi128_si64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovq %xmm0, %rax
+; AVX-NEXT:    retq
   %res = extractelement <2 x i64> %a0, i32 0
   ret i64 %res
 }
 
 define <2 x double> @test_mm_cvtsi64_sd(<2 x double> %a0, i64 %a1) nounwind {
-; X64-LABEL: test_mm_cvtsi64_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtsi2sdq %rdi, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtsi64_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtsi2sdq %rdi, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_mm_cvtsi64_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
 
 define <2 x i64> @test_mm_cvtsi64_si128(i64 %a0) nounwind {
-; X64-LABEL: test_mm_cvtsi64_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtsi64_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq %rdi, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_mm_cvtsi64_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovq %rdi, %xmm0
+; AVX-NEXT:    retq
   %res0 = insertelement <2 x i64> undef, i64 %a0, i32 0
   %res1 = insertelement <2 x i64> %res0, i64 0, i32 1
   ret <2 x i64> %res1
 }
 
 define i64 @test_mm_cvttsd_si64(<2 x double> %a0) nounwind {
-; X64-LABEL: test_mm_cvttsd_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    cvttsd2si %xmm0, %rax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvttsd_si64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttsd2si %xmm0, %rax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_mm_cvttsd_si64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttsd2si %xmm0, %rax
+; AVX-NEXT:    retq
   %res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0)
   ret i64 %res
 }
 declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_loadu_si64(i64* %a0) nounwind {
-; X64-LABEL: test_mm_loadu_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_loadu_si64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_mm_loadu_si64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    retq
   %ld = load i64, i64* %a0, align 1
   %res0 = insertelement <2 x i64> undef, i64 %ld, i32 0
   %res1 = insertelement <2 x i64> %res0, i64 0, i32 1
@@ -64,10 +96,10 @@ define <2 x i64> @test_mm_loadu_si64(i64
 }
 
 define void @test_mm_stream_si64(i64 *%a0, i64 %a1) {
-; X64-LABEL: test_mm_stream_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    movntiq %rsi, (%rdi)
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_stream_si64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movntiq %rsi, (%rdi)
+; CHECK-NEXT:    retq
   store i64 %a1, i64* %a0, align 1, !nontemporal !0
   ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll?rev=333832&r1=333831&r2=333832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll Sat Jun  2 12:43:14 2018
@@ -1,19 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
 
 define <2 x i64> @test_mm_add_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_add_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    paddb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_add_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    paddb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_add_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_add_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = add <16 x i8> %arg0, %arg1
@@ -22,15 +26,15 @@ define <2 x i64> @test_mm_add_epi8(<2 x
 }
 
 define <2 x i64> @test_mm_add_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_add_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    paddw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_add_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    paddw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_add_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_add_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = add <8 x i16> %arg0, %arg1
@@ -39,15 +43,15 @@ define <2 x i64> @test_mm_add_epi16(<2 x
 }
 
 define <2 x i64> @test_mm_add_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_add_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    paddd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_add_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    paddd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_add_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_add_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = add <4 x i32> %arg0, %arg1
@@ -56,43 +60,43 @@ define <2 x i64> @test_mm_add_epi32(<2 x
 }
 
 define <2 x i64> @test_mm_add_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_add_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    paddq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_add_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    paddq %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_add_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddq %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_add_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = add <2 x i64> %a0, %a1
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_add_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_add_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    addpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_add_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    addpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_add_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_add_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = fadd <2 x double> %a0, %a1
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_add_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_add_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    addsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_add_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    addsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_add_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_add_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %ext0 = extractelement <2 x double> %a0, i32 0
   %ext1 = extractelement <2 x double> %a1, i32 0
   %fadd = fadd double %ext0, %ext1
@@ -101,15 +105,15 @@ define <2 x double> @test_mm_add_sd(<2 x
 }
 
 define <2 x i64> @test_mm_adds_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_adds_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    paddsb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_adds_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    paddsb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_adds_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_adds_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %arg0, <16 x i8> %arg1)
@@ -119,15 +123,15 @@ define <2 x i64> @test_mm_adds_epi8(<2 x
 declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_adds_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_adds_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    paddsw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_adds_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    paddsw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_adds_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_adds_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -137,15 +141,15 @@ define <2 x i64> @test_mm_adds_epi16(<2
 declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_adds_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_adds_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    paddusb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_adds_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    paddusb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_adds_epu8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_adds_epu8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %arg0, <16 x i8> %arg1)
@@ -155,15 +159,15 @@ define <2 x i64> @test_mm_adds_epu8(<2 x
 declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_adds_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_adds_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    paddusw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_adds_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    paddusw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_adds_epu16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_adds_epu16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -173,15 +177,15 @@ define <2 x i64> @test_mm_adds_epu16(<2
 declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x double> @test_mm_and_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_and_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    andps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_and_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    andps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_and_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    andps %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_and_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x double> %a0 to <4 x i32>
   %arg1 = bitcast <2 x double> %a1 to <4 x i32>
   %res = and <4 x i32> %arg0, %arg1
@@ -190,29 +194,29 @@ define <2 x double> @test_mm_and_pd(<2 x
 }
 
 define <2 x i64> @test_mm_and_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_and_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    andps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_and_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    andps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_and_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    andps %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_and_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = and <2 x i64> %a0, %a1
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_andnot_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_andnot_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    andnps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_andnot_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    andnps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_andnot_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    andnps %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_andnot_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vandnps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x double> %a0 to <4 x i32>
   %arg1 = bitcast <2 x double> %a1 to <4 x i32>
   %not = xor <4 x i32> %arg0, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -222,34 +226,52 @@ define <2 x double> @test_mm_andnot_pd(<
 }
 
 define <2 x i64> @test_mm_andnot_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_andnot_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpeqd %xmm2, %xmm2
-; X32-NEXT:    pxor %xmm2, %xmm0
-; X32-NEXT:    pand %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_andnot_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpeqd %xmm2, %xmm2
-; X64-NEXT:    pxor %xmm2, %xmm0
-; X64-NEXT:    pand %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_andnot_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pxor %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_andnot_si128:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_andnot_si128:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %not = xor <2 x i64> %a0, <i64 -1, i64 -1>
   %res = and <2 x i64> %not, %a1
   ret <2 x i64> %res
 }
 
 define <2 x i64> @test_mm_avg_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_avg_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    pavgb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_avg_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    pavgb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_avg_epu8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pavgb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_avg_epu8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_avg_epu8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %zext0 = zext <16 x i8> %arg0 to <16 x i16>
@@ -263,15 +285,27 @@ define <2 x i64> @test_mm_avg_epu8(<2 x
 }
 
 define <2 x i64> @test_mm_avg_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_avg_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    pavgw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_avg_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    pavgw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_avg_epu16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pavgw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_avg_epu16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_avg_epu16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %zext0 = zext <8 x i16> %arg0 to <8 x i32>
@@ -285,15 +319,15 @@ define <2 x i64> @test_mm_avg_epu16(<2 x
 }
 
 define <2 x i64> @test_mm_bslli_si128(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_bslli_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_bslli_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_bslli_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_bslli_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = shufflevector <16 x i8> zeroinitializer, <16 x i8> %arg0, <16 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26>
   %bc = bitcast <16 x i8> %res to <2 x i64>
@@ -301,15 +335,15 @@ define <2 x i64> @test_mm_bslli_si128(<2
 }
 
 define <2 x i64> @test_mm_bsrli_si128(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_bsrli_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_bsrli_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_bsrli_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_bsrli_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = shufflevector <16 x i8> %arg0, <16 x i8> zeroinitializer, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
   %bc = bitcast <16 x i8> %res to <2 x i64>
@@ -317,83 +351,59 @@ define <2 x i64> @test_mm_bsrli_si128(<2
 }
 
 define <4 x float> @test_mm_castpd_ps(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_castpd_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_castpd_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_castpd_ps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = bitcast <2 x double> %a0 to <4 x float>
   ret <4 x float> %res
 }
 
 define <2 x i64> @test_mm_castpd_si128(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_castpd_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_castpd_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_castpd_si128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = bitcast <2 x double> %a0 to <2 x i64>
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_castps_pd(<4 x float> %a0) nounwind {
-; X32-LABEL: test_mm_castps_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_castps_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_castps_pd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = bitcast <4 x float> %a0 to <2 x double>
   ret <2 x double> %res
 }
 
 define <2 x i64> @test_mm_castps_si128(<4 x float> %a0) nounwind {
-; X32-LABEL: test_mm_castps_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_castps_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_castps_si128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = bitcast <4 x float> %a0 to <2 x i64>
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_castsi128_pd(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_castsi128_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_castsi128_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_castsi128_pd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = bitcast <2 x i64> %a0 to <2 x double>
   ret <2 x double> %res
 }
 
 define <4 x float> @test_mm_castsi128_ps(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_castsi128_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_castsi128_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_castsi128_ps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = bitcast <2 x i64> %a0 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @test_mm_clflush(i8* %a0) nounwind {
-; X32-LABEL: test_mm_clflush:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    clflush (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_mm_clflush:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    clflush (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_clflush:
 ; X64:       # %bb.0:
@@ -405,15 +415,21 @@ define void @test_mm_clflush(i8* %a0) no
 declare void @llvm.x86.sse2.clflush(i8*) nounwind readnone
 
 define <2 x i64> @test_mm_cmpeq_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmpeq_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpeqb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpeq_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpeqb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpeq_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpeq_epi8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpeq_epi8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqb %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2b %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %cmp = icmp eq <16 x i8> %arg0, %arg1
@@ -423,15 +439,21 @@ define <2 x i64> @test_mm_cmpeq_epi8(<2
 }
 
 define <2 x i64> @test_mm_cmpeq_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmpeq_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpeqw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpeq_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpeqw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpeq_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpeq_epi16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpeq_epi16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqw %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2w %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %cmp = icmp eq <8 x i16> %arg0, %arg1
@@ -441,15 +463,21 @@ define <2 x i64> @test_mm_cmpeq_epi16(<2
 }
 
 define <2 x i64> @test_mm_cmpeq_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmpeq_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpeqd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpeq_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpeqd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpeq_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpeq_epi32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpeq_epi32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2d %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %cmp = icmp eq <4 x i32> %arg0, %arg1
@@ -459,15 +487,21 @@ define <2 x i64> @test_mm_cmpeq_epi32(<2
 }
 
 define <2 x double> @test_mm_cmpeq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpeq_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpeqpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpeq_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpeqpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpeq_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpeqpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpeq_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpeqpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpeq_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpeqpd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp oeq <2 x double> %a0, %a1
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -475,32 +509,37 @@ define <2 x double> @test_mm_cmpeq_pd(<2
 }
 
 define <2 x double> @test_mm_cmpeq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpeq_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpeqsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpeq_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpeqsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpeq_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpeqsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpeq_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcmpeqsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone
 
 define <2 x double> @test_mm_cmpge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpge_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmplepd %xmm0, %xmm1
-; X32-NEXT:    movapd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpge_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmplepd %xmm0, %xmm1
-; X64-NEXT:    movapd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpge_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmplepd %xmm0, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpge_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmplepd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpge_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmplepd %xmm0, %xmm1, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp ole <2 x double> %a1, %a0
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -508,17 +547,23 @@ define <2 x double> @test_mm_cmpge_pd(<2
 }
 
 define <2 x double> @test_mm_cmpge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpge_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmplesd %xmm0, %xmm1
-; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpge_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmplesd %xmm0, %xmm1
-; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpge_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmplesd %xmm0, %xmm1
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpge_sd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmplesd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpge_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmplesd %xmm0, %xmm1, %xmm1
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX512-NEXT:    ret{{[l|q]}}
   %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 2)
   %ext0 = extractelement <2 x double> %cmp, i32 0
   %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
@@ -528,15 +573,21 @@ define <2 x double> @test_mm_cmpge_sd(<2
 }
 
 define <2 x i64> @test_mm_cmpgt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmpgt_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpgtb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpgt_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpgtb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpgt_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpgtb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpgt_epi8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpgt_epi8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpgtb %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2b %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %cmp = icmp sgt <16 x i8> %arg0, %arg1
@@ -546,15 +597,21 @@ define <2 x i64> @test_mm_cmpgt_epi8(<2
 }
 
 define <2 x i64> @test_mm_cmpgt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmpgt_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpgtw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpgt_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpgtw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpgt_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpgtw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpgt_epi16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpgt_epi16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpgtw %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2w %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %cmp = icmp sgt <8 x i16> %arg0, %arg1
@@ -564,15 +621,21 @@ define <2 x i64> @test_mm_cmpgt_epi16(<2
 }
 
 define <2 x i64> @test_mm_cmpgt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmpgt_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpgtd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpgt_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpgtd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpgt_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpgt_epi32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpgt_epi32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2d %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %cmp = icmp sgt <4 x i32> %arg0, %arg1
@@ -582,17 +645,22 @@ define <2 x i64> @test_mm_cmpgt_epi32(<2
 }
 
 define <2 x double> @test_mm_cmpgt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpgt_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpltpd %xmm0, %xmm1
-; X32-NEXT:    movapd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpgt_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpltpd %xmm0, %xmm1
-; X64-NEXT:    movapd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpgt_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpltpd %xmm0, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpgt_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpgt_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpltpd %xmm0, %xmm1, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp olt <2 x double> %a1, %a0
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -600,17 +668,23 @@ define <2 x double> @test_mm_cmpgt_pd(<2
 }
 
 define <2 x double> @test_mm_cmpgt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpgt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpltsd %xmm0, %xmm1
-; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpgt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpltsd %xmm0, %xmm1
-; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpgt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpltsd %xmm0, %xmm1
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpgt_sd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpltsd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpgt_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpltsd %xmm0, %xmm1, %xmm1
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX512-NEXT:    ret{{[l|q]}}
   %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 1)
   %ext0 = extractelement <2 x double> %cmp, i32 0
   %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
@@ -620,15 +694,21 @@ define <2 x double> @test_mm_cmpgt_sd(<2
 }
 
 define <2 x double> @test_mm_cmple_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmple_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmplepd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmple_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmplepd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmple_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmplepd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmple_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmplepd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmple_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmplepd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp ole <2 x double> %a0, %a1
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -636,31 +716,36 @@ define <2 x double> @test_mm_cmple_pd(<2
 }
 
 define <2 x double> @test_mm_cmple_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmple_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmplesd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmple_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmplesd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmple_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmplesd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmple_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcmplesd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 2)
   ret <2 x double> %res
 }
 
 define <2 x i64> @test_mm_cmplt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmplt_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpgtb %xmm0, %xmm1
-; X32-NEXT:    movdqa %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmplt_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpgtb %xmm0, %xmm1
-; X64-NEXT:    movdqa %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmplt_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpgtb %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmplt_epi8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmplt_epi8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpgtb %xmm0, %xmm1, %k0
+; AVX512-NEXT:    vpmovm2b %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %cmp = icmp sgt <16 x i8> %arg1, %arg0
@@ -670,17 +755,22 @@ define <2 x i64> @test_mm_cmplt_epi8(<2
 }
 
 define <2 x i64> @test_mm_cmplt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmplt_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpgtw %xmm0, %xmm1
-; X32-NEXT:    movdqa %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmplt_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpgtw %xmm0, %xmm1
-; X64-NEXT:    movdqa %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmplt_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpgtw %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmplt_epi16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmplt_epi16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpgtw %xmm0, %xmm1, %k0
+; AVX512-NEXT:    vpmovm2w %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %cmp = icmp sgt <8 x i16> %arg1, %arg0
@@ -690,17 +780,22 @@ define <2 x i64> @test_mm_cmplt_epi16(<2
 }
 
 define <2 x i64> @test_mm_cmplt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_cmplt_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpgtd %xmm0, %xmm1
-; X32-NEXT:    movdqa %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmplt_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpgtd %xmm0, %xmm1
-; X64-NEXT:    movdqa %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmplt_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmplt_epi32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmplt_epi32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpgtd %xmm0, %xmm1, %k0
+; AVX512-NEXT:    vpmovm2d %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %cmp = icmp sgt <4 x i32> %arg1, %arg0
@@ -710,15 +805,21 @@ define <2 x i64> @test_mm_cmplt_epi32(<2
 }
 
 define <2 x double> @test_mm_cmplt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmplt_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpltpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmplt_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpltpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmplt_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpltpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmplt_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpltpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmplt_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpltpd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp olt <2 x double> %a0, %a1
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -726,29 +827,35 @@ define <2 x double> @test_mm_cmplt_pd(<2
 }
 
 define <2 x double> @test_mm_cmplt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmplt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpltsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmplt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpltsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmplt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpltsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmplt_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcmpltsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 1)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_cmpneq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpneq_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpneqpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpneq_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpneqpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpneq_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpneqpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpneq_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpneqpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpneq_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpneqpd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp une <2 x double> %a0, %a1
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -756,31 +863,36 @@ define <2 x double> @test_mm_cmpneq_pd(<
 }
 
 define <2 x double> @test_mm_cmpneq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpneq_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpneqsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpneq_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpneqsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpneq_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpneqsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpneq_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcmpneqsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 4)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_cmpnge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpnge_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpnlepd %xmm0, %xmm1
-; X32-NEXT:    movapd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpnge_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpnlepd %xmm0, %xmm1
-; X64-NEXT:    movapd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpnge_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpnlepd %xmm0, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpnge_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpnlepd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpnge_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpnlepd %xmm0, %xmm1, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp ugt <2 x double> %a1, %a0
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -788,17 +900,23 @@ define <2 x double> @test_mm_cmpnge_pd(<
 }
 
 define <2 x double> @test_mm_cmpnge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpnge_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpnlesd %xmm0, %xmm1
-; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpnge_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpnlesd %xmm0, %xmm1
-; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpnge_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpnlesd %xmm0, %xmm1
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpnge_sd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpnlesd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpnge_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpnlesd %xmm0, %xmm1, %xmm1
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX512-NEXT:    ret{{[l|q]}}
   %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 6)
   %ext0 = extractelement <2 x double> %cmp, i32 0
   %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
@@ -808,17 +926,22 @@ define <2 x double> @test_mm_cmpnge_sd(<
 }
 
 define <2 x double> @test_mm_cmpngt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpngt_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpnltpd %xmm0, %xmm1
-; X32-NEXT:    movapd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpngt_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpnltpd %xmm0, %xmm1
-; X64-NEXT:    movapd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpngt_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpnltpd %xmm0, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpngt_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpnltpd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpngt_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpnltpd %xmm0, %xmm1, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp uge <2 x double> %a1, %a0
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -826,17 +949,23 @@ define <2 x double> @test_mm_cmpngt_pd(<
 }
 
 define <2 x double> @test_mm_cmpngt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpngt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpnltsd %xmm0, %xmm1
-; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpngt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpnltsd %xmm0, %xmm1
-; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpngt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpnltsd %xmm0, %xmm1
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpngt_sd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpnltsd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpngt_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpnltsd %xmm0, %xmm1, %xmm1
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX512-NEXT:    ret{{[l|q]}}
   %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 5)
   %ext0 = extractelement <2 x double> %cmp, i32 0
   %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
@@ -846,15 +975,21 @@ define <2 x double> @test_mm_cmpngt_sd(<
 }
 
 define <2 x double> @test_mm_cmpnle_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpnle_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpnlepd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpnle_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpnlepd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpnle_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpnlepd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpnle_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpnlepd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpnle_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpnlepd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp ugt <2 x double> %a0, %a1
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -862,29 +997,35 @@ define <2 x double> @test_mm_cmpnle_pd(<
 }
 
 define <2 x double> @test_mm_cmpnle_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpnle_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpnlesd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpnle_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpnlesd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpnle_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpnlesd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpnle_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcmpnlesd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 6)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_cmpnlt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpnlt_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpnltpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpnlt_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpnltpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpnlt_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpnltpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpnlt_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpnltpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpnlt_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpnltpd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp uge <2 x double> %a0, %a1
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -892,29 +1033,35 @@ define <2 x double> @test_mm_cmpnlt_pd(<
 }
 
 define <2 x double> @test_mm_cmpnlt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpnlt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpnltsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpnlt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpnltsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpnlt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpnltsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpnlt_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcmpnltsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 5)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_cmpord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpord_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpordpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpord_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpordpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpord_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpordpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpord_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpordpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpord_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpordpd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp ord <2 x double> %a0, %a1
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -922,29 +1069,35 @@ define <2 x double> @test_mm_cmpord_pd(<
 }
 
 define <2 x double> @test_mm_cmpord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpord_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpordsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpord_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpordsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpord_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpordsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpord_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcmpordsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_cmpunord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpunord_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpunordpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpunord_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpunordpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpunord_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpunordpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_cmpunord_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vcmpunordpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpunord_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vcmpunordpd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %fcmp = fcmp uno <2 x double> %a0, %a1
   %sext = sext <2 x i1> %fcmp to <2 x i64>
   %res = bitcast <2 x i64> %sext to <2 x double>
@@ -952,151 +1105,151 @@ define <2 x double> @test_mm_cmpunord_pd
 }
 
 define <2 x double> @test_mm_cmpunord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_cmpunord_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cmpunordsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpunord_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cmpunordsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpunord_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cmpunordsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpunord_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcmpunordsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 3)
   ret <2 x double> %res
 }
 
 define i32 @test_mm_comieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_comieq_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    comisd %xmm1, %xmm0
-; X32-NEXT:    setnp %al
-; X32-NEXT:    sete %cl
-; X32-NEXT:    andb %al, %cl
-; X32-NEXT:    movzbl %cl, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_comieq_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    comisd %xmm1, %xmm0
-; X64-NEXT:    setnp %al
-; X64-NEXT:    sete %cl
-; X64-NEXT:    andb %al, %cl
-; X64-NEXT:    movzbl %cl, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_comieq_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    comisd %xmm1, %xmm0
+; SSE-NEXT:    setnp %al
+; SSE-NEXT:    sete %cl
+; SSE-NEXT:    andb %al, %cl
+; SSE-NEXT:    movzbl %cl, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_comieq_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcomisd %xmm1, %xmm0
+; AVX-NEXT:    setnp %al
+; AVX-NEXT:    sete %cl
+; AVX-NEXT:    andb %al, %cl
+; AVX-NEXT:    movzbl %cl, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_comige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_comige_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    comisd %xmm1, %xmm0
-; X32-NEXT:    setae %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_comige_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    comisd %xmm1, %xmm0
-; X64-NEXT:    setae %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_comige_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    comisd %xmm1, %xmm0
+; SSE-NEXT:    setae %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_comige_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vcomisd %xmm1, %xmm0
+; AVX-NEXT:    setae %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.comige.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_comigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_comigt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    comisd %xmm1, %xmm0
-; X32-NEXT:    seta %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_comigt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    comisd %xmm1, %xmm0
-; X64-NEXT:    seta %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_comigt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    comisd %xmm1, %xmm0
+; SSE-NEXT:    seta %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_comigt_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vcomisd %xmm1, %xmm0
+; AVX-NEXT:    seta %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.comigt.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_comile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_comile_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    comisd %xmm0, %xmm1
-; X32-NEXT:    setae %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_comile_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    comisd %xmm0, %xmm1
-; X64-NEXT:    setae %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_comile_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    comisd %xmm0, %xmm1
+; SSE-NEXT:    setae %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_comile_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vcomisd %xmm0, %xmm1
+; AVX-NEXT:    setae %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.comile.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_comilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_comilt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    comisd %xmm0, %xmm1
-; X32-NEXT:    seta %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_comilt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    comisd %xmm0, %xmm1
-; X64-NEXT:    seta %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_comilt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    comisd %xmm0, %xmm1
+; SSE-NEXT:    seta %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_comilt_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vcomisd %xmm0, %xmm1
+; AVX-NEXT:    seta %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.comilt.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_comineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_comineq_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    comisd %xmm1, %xmm0
-; X32-NEXT:    setp %al
-; X32-NEXT:    setne %cl
-; X32-NEXT:    orb %al, %cl
-; X32-NEXT:    movzbl %cl, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_comineq_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    comisd %xmm1, %xmm0
-; X64-NEXT:    setp %al
-; X64-NEXT:    setne %cl
-; X64-NEXT:    orb %al, %cl
-; X64-NEXT:    movzbl %cl, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_comineq_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    comisd %xmm1, %xmm0
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    setne %cl
+; SSE-NEXT:    orb %al, %cl
+; SSE-NEXT:    movzbl %cl, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_comineq_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcomisd %xmm1, %xmm0
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    setne %cl
+; AVX-NEXT:    orb %al, %cl
+; AVX-NEXT:    movzbl %cl, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.comineq.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.comineq.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define <2 x double> @test_mm_cvtepi32_pd(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_cvtepi32_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtdq2pd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepi32_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtdq2pd %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepi32_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepi32_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %ext = shufflevector <4 x i32> %arg0, <4 x i32> %arg0, <2 x i32> <i32 0, i32 1>
   %res = sitofp <2 x i32> %ext to <2 x double>
@@ -1104,30 +1257,30 @@ define <2 x double> @test_mm_cvtepi32_pd
 }
 
 define <4 x float> @test_mm_cvtepi32_ps(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_cvtepi32_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtdq2ps %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepi32_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtdq2ps %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepi32_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepi32_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = sitofp <4 x i32> %arg0 to <4 x float>
   ret <4 x float> %res
 }
 
 define <2 x i64> @test_mm_cvtpd_epi32(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_cvtpd_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtpd2dq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtpd_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtpd2dq %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtpd_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtpd2dq %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtpd_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtpd2dq %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
   ret <2 x i64> %bc
@@ -1135,30 +1288,30 @@ define <2 x i64> @test_mm_cvtpd_epi32(<2
 declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
 
 define <4 x float> @test_mm_cvtpd_ps(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_cvtpd_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtpd2ps %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtpd_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtpd2ps %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtpd_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtpd2ps %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtpd_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtpd2ps %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_cvtps_epi32(<4 x float> %a0) nounwind {
-; X32-LABEL: test_mm_cvtps_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtps2dq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtps_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtps2dq %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtps_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtps2dq %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtps_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtps2dq %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
   ret <2 x i64> %bc
@@ -1166,32 +1319,44 @@ define <2 x i64> @test_mm_cvtps_epi32(<4
 declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
 
 define <2 x double> @test_mm_cvtps_pd(<4 x float> %a0) nounwind {
-; X32-LABEL: test_mm_cvtps_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtps2pd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtps_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtps2pd %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtps_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtps2pd %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtps_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtps2pd %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %ext = shufflevector <4 x float> %a0, <4 x float> %a0, <2 x i32> <i32 0, i32 1>
   %res = fpext <2 x float> %ext to <2 x double>
   ret <2 x double> %res
 }
 
 define double @test_mm_cvtsd_f64(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_cvtsd_f64:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $8, %esp
-; X32-NEXT:    movlps %xmm0, (%esp)
-; X32-NEXT:    fldl (%esp)
-; X32-NEXT:    movl %ebp, %esp
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-SSE-LABEL: test_mm_cvtsd_f64:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebp
+; X86-SSE-NEXT:    movl %esp, %ebp
+; X86-SSE-NEXT:    andl $-8, %esp
+; X86-SSE-NEXT:    subl $8, %esp
+; X86-SSE-NEXT:    movlps %xmm0, (%esp)
+; X86-SSE-NEXT:    fldl (%esp)
+; X86-SSE-NEXT:    movl %ebp, %esp
+; X86-SSE-NEXT:    popl %ebp
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_cvtsd_f64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %ebp
+; X86-AVX-NEXT:    movl %esp, %ebp
+; X86-AVX-NEXT:    andl $-8, %esp
+; X86-AVX-NEXT:    subl $8, %esp
+; X86-AVX-NEXT:    vmovlps %xmm0, (%esp)
+; X86-AVX-NEXT:    fldl (%esp)
+; X86-AVX-NEXT:    movl %ebp, %esp
+; X86-AVX-NEXT:    popl %ebp
+; X86-AVX-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsd_f64:
 ; X64:       # %bb.0:
@@ -1201,91 +1366,130 @@ define double @test_mm_cvtsd_f64(<2 x do
 }
 
 define i32 @test_mm_cvtsd_si32(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_cvtsd_si32:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtsd2si %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtsd_si32:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtsd2si %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtsd_si32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtsd2si %xmm0, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtsd_si32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtsd2si %xmm0, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
 
 define <4 x float> @test_mm_cvtsd_ss(<4 x float> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_cvtsd_ss:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtsd2ss %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtsd_ss:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtsd2ss %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtsd_ss:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtsd2ss %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtsd_ss:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtsd2ss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone
 
 define <4 x float> @test_mm_cvtsd_ss_load(<4 x float> %a0, <2 x double>* %p1) {
-; X32-LABEL: test_mm_cvtsd_ss_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    cvtsd2ss (%eax), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtsd_ss_load:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtsd2ss (%rdi), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_cvtsd_ss_load:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    cvtsd2ss (%eax), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_cvtsd_ss_load:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovaps (%eax), %xmm1
+; X86-AVX-NEXT:    vcvtsd2ss %xmm1, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cvtsd_ss_load:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    cvtsd2ss (%rdi), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cvtsd_ss_load:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps (%rdi), %xmm1
+; X64-AVX-NEXT:    vcvtsd2ss %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %a1 = load <2 x double>, <2 x double>* %p1
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1)
   ret <4 x float> %res
 }
 
 define i32 @test_mm_cvtsi128_si32(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_cvtsi128_si32:
-; X32:       # %bb.0:
-; X32-NEXT:    movd %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtsi128_si32:
-; X64:       # %bb.0:
-; X64-NEXT:    movd %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtsi128_si32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtsi128_si32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = extractelement <4 x i32> %arg0, i32 0
   ret i32 %res
 }
 
 define <2 x double> @test_mm_cvtsi32_sd(<2 x double> %a0, i32 %a1) nounwind {
-; X32-LABEL: test_mm_cvtsi32_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtsi32_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtsi2sdl %edi, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_cvtsi32_sd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: test_mm_cvtsi32_sd:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX512-LABEL: test_mm_cvtsi32_sd:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    vcvtsi2sdl %eax, %xmm0, %xmm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cvtsi32_sd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    cvtsi2sdl %edi, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cvtsi32_sd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %cvt = sitofp i32 %a1 to double
   %res = insertelement <2 x double> %a0, double %cvt, i32 0
   ret <2 x double> %res
 }
 
 define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind {
-; X32-LABEL: test_mm_cvtsi32_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtsi32_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    movd %edi, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_cvtsi32_si128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_cvtsi32_si128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cvtsi32_si128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd %edi, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cvtsi32_si128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovd %edi, %xmm0
+; X64-AVX-NEXT:    retq
   %res0 = insertelement <4 x i32> undef, i32 %a0, i32 0
   %res1 = insertelement <4 x i32> %res0, i32 0, i32 1
   %res2 = insertelement <4 x i32> %res1, i32 0, i32 2
@@ -1295,15 +1499,15 @@ define <2 x i64> @test_mm_cvtsi32_si128(
 }
 
 define <2 x double> @test_mm_cvtss_sd(<2 x double> %a0, <4 x float> %a1) nounwind {
-; X32-LABEL: test_mm_cvtss_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    cvtss2sd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtss_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    cvtss2sd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtss_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvtss2sd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtss_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %ext = extractelement <4 x float> %a1, i32 0
   %cvt = fpext float %ext to double
   %res = insertelement <2 x double> %a0, double %cvt, i32 0
@@ -1311,15 +1515,15 @@ define <2 x double> @test_mm_cvtss_sd(<2
 }
 
 define <2 x i64> @test_mm_cvttpd_epi32(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_cvttpd_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    cvttpd2dq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvttpd_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    cvttpd2dq %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvttpd_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttpd2dq %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvttpd_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttpd2dq %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
   ret <2 x i64> %bc
@@ -1327,15 +1531,15 @@ define <2 x i64> @test_mm_cvttpd_epi32(<
 declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_cvttps_epi32(<4 x float> %a0) nounwind {
-; X32-LABEL: test_mm_cvttps_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    cvttps2dq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvttps_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    cvttps2dq %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvttps_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttps2dq %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvttps_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
   ret <2 x i64> %bc
@@ -1343,44 +1547,44 @@ define <2 x i64> @test_mm_cvttps_epi32(<
 declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
 
 define i32 @test_mm_cvttsd_si32(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_cvttsd_si32:
-; X32:       # %bb.0:
-; X32-NEXT:    cvttsd2si %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvttsd_si32:
-; X64:       # %bb.0:
-; X64-NEXT:    cvttsd2si %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvttsd_si32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttsd2si %xmm0, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvttsd_si32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttsd2si %xmm0, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
 
 define <2 x double> @test_mm_div_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_div_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    divpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_div_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    divpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_div_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    divpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_div_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vdivpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = fdiv <2 x double> %a0, %a1
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_div_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_div_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    divsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_div_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    divsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_div_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    divsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_div_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %ext0 = extractelement <2 x double> %a0, i32 0
   %ext1 = extractelement <2 x double> %a1, i32 0
   %fdiv = fdiv double %ext0, %ext1
@@ -1389,17 +1593,17 @@ define <2 x double> @test_mm_div_sd(<2 x
 }
 
 define i32 @test_mm_extract_epi16(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_extract_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pextrw $1, %xmm0, %eax
-; X32-NEXT:    movzwl %ax, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_extract_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pextrw $1, %xmm0, %eax
-; X64-NEXT:    movzwl %ax, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_extract_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $1, %xmm0, %eax
+; SSE-NEXT:    movzwl %ax, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_extract_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-NEXT:    movzwl %ax, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %ext = extractelement <8 x i16> %arg0, i32 1
   %res = zext i16 %ext to i32
@@ -1407,16 +1611,27 @@ define i32 @test_mm_extract_epi16(<2 x i
 }
 
 define <2 x i64> @test_mm_insert_epi16(<2 x i64> %a0, i16 %a1) nounwind {
-; X32-LABEL: test_mm_insert_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    pinsrw $1, %eax, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_insert_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pinsrw $1, %edi, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_insert_epi16:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    pinsrw $1, %eax, %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_insert_epi16:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_insert_epi16:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pinsrw $1, %edi, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_insert_epi16:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = insertelement <8 x i16> %arg0, i16 %a1,i32 1
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -1424,47 +1639,64 @@ define <2 x i64> @test_mm_insert_epi16(<
 }
 
 define void @test_mm_lfence() nounwind {
-; X32-LABEL: test_mm_lfence:
-; X32:       # %bb.0:
-; X32-NEXT:    lfence
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_lfence:
-; X64:       # %bb.0:
-; X64-NEXT:    lfence
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_lfence:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lfence
+; CHECK-NEXT:    ret{{[l|q]}}
   call void @llvm.x86.sse2.lfence()
   ret void
 }
 declare void @llvm.x86.sse2.lfence() nounwind readnone
 
 define <2 x double> @test_mm_load_pd(double* %a0) nounwind {
-; X32-LABEL: test_mm_load_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movaps (%eax), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_load_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps (%rdi), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_load_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movaps (%eax), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_load_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovaps (%eax), %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_load_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps (%rdi), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_load_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
   %res = load <2 x double>, <2 x double>* %arg0, align 16
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_load_sd(double* %a0) nounwind {
-; X32-LABEL: test_mm_load_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_load_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_load_sd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_load_sd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_load_sd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_load_sd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT:    retq
   %ld = load double, double* %a0, align 1
   %res0 = insertelement <2 x double> undef, double %ld, i32 0
   %res1 = insertelement <2 x double> %res0, double 0.0, i32 1
@@ -1472,33 +1704,55 @@ define <2 x double> @test_mm_load_sd(dou
 }
 
 define <2 x i64> @test_mm_load_si128(<2 x i64>* %a0) nounwind {
-; X32-LABEL: test_mm_load_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movaps (%eax), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_load_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps (%rdi), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_load_si128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movaps (%eax), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_load_si128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovaps (%eax), %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_load_si128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps (%rdi), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_load_si128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX-NEXT:    retq
   %res = load <2 x i64>, <2 x i64>* %a0, align 16
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_load1_pd(double* %a0) nounwind {
-; X32-LABEL: test_mm_load1_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_load1_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_load1_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_load1_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_load1_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_load1_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-AVX-NEXT:    retq
   %ld = load double, double* %a0, align 8
   %res0 = insertelement <2 x double> undef, double %ld, i32 0
   %res1 = insertelement <2 x double> %res0, double %ld, i32 1
@@ -1506,32 +1760,54 @@ define <2 x double> @test_mm_load1_pd(do
 }
 
 define <2 x double> @test_mm_loadh_pd(<2 x double> %a0, double* %a1) nounwind {
-; X32-LABEL: test_mm_loadh_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_loadh_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_loadh_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_loadh_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_loadh_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_loadh_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; X64-AVX-NEXT:    retq
   %ld = load double, double* %a1, align 8
   %res = insertelement <2 x double> %a0, double %ld, i32 1
   ret <2 x double> %res
 }
 
 define <2 x i64> @test_mm_loadl_epi64(<2 x i64> %a0, <2 x i64>* %a1) nounwind {
-; X32-LABEL: test_mm_loadl_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_loadl_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_loadl_epi64:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_loadl_epi64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_loadl_epi64:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_loadl_epi64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT:    retq
   %bc = bitcast <2 x i64>* %a1 to i64*
   %ld = load i64, i64* %bc, align 1
   %res0 = insertelement <2 x i64> undef, i64 %ld, i32 0
@@ -1540,34 +1816,56 @@ define <2 x i64> @test_mm_loadl_epi64(<2
 }
 
 define <2 x double> @test_mm_loadl_pd(<2 x double> %a0, double* %a1) nounwind {
-; X32-LABEL: test_mm_loadl_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_loadl_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_loadl_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_loadl_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_loadl_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_loadl_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
+; X64-AVX-NEXT:    retq
   %ld = load double, double* %a1, align 8
   %res = insertelement <2 x double> %a0, double %ld, i32 0
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_loadr_pd(double* %a0) nounwind {
-; X32-LABEL: test_mm_loadr_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movapd (%eax), %xmm0
-; X32-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_loadr_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movapd (%rdi), %xmm0
-; X64-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_loadr_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movapd (%eax), %xmm0
+; X86-SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_loadr_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = mem[1,0]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_loadr_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movapd (%rdi), %xmm0
+; X64-SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_loadr_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = mem[1,0]
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
   %ld = load <2 x double>, <2 x double>* %arg0, align 16
   %res = shufflevector <2 x double> %ld, <2 x double> undef, <2 x i32> <i32 1, i32 0>
@@ -1575,46 +1873,68 @@ define <2 x double> @test_mm_loadr_pd(do
 }
 
 define <2 x double> @test_mm_loadu_pd(double* %a0) nounwind {
-; X32-LABEL: test_mm_loadu_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movups (%eax), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_loadu_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movups (%rdi), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_loadu_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movups (%eax), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_loadu_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups (%eax), %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_loadu_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movups (%rdi), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_loadu_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovups (%rdi), %xmm0
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
   %res = load <2 x double>, <2 x double>* %arg0, align 1
   ret <2 x double> %res
 }
 
 define <2 x i64> @test_mm_loadu_si128(<2 x i64>* %a0) nounwind {
-; X32-LABEL: test_mm_loadu_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movups (%eax), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_loadu_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    movups (%rdi), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_loadu_si128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movups (%eax), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_loadu_si128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups (%eax), %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_loadu_si128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movups (%rdi), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_loadu_si128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovups (%rdi), %xmm0
+; X64-AVX-NEXT:    retq
   %res = load <2 x i64>, <2 x i64>* %a0, align 1
   ret <2 x i64> %res
 }
 
 define <2 x i64> @test_mm_madd_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_madd_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pmaddwd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_madd_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pmaddwd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_madd_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmaddwd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_madd_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -1624,18 +1944,31 @@ define <2 x i64> @test_mm_madd_epi16(<2
 declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
 
 define void @test_mm_maskmoveu_si128(<2 x i64> %a0, <2 x i64> %a1, i8* %a2) nounwind {
-; X32-LABEL: test_mm_maskmoveu_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    maskmovdqu %xmm1, %xmm0
-; X32-NEXT:    popl %edi
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maskmoveu_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    maskmovdqu %xmm1, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_maskmoveu_si128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %edi
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-SSE-NEXT:    maskmovdqu %xmm1, %xmm0
+; X86-SSE-NEXT:    popl %edi
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_maskmoveu_si128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %edi
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-AVX-NEXT:    vmaskmovdqu %xmm1, %xmm0
+; X86-AVX-NEXT:    popl %edi
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_maskmoveu_si128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    maskmovdqu %xmm1, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_maskmoveu_si128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmaskmovdqu %xmm1, %xmm0
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %arg0, <16 x i8> %arg1, i8* %a2)
@@ -1644,15 +1977,15 @@ define void @test_mm_maskmoveu_si128(<2
 declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*) nounwind
 
 define <2 x i64> @test_mm_max_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_max_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pmaxsw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_max_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pmaxsw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_max_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmaxsw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_max_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %cmp = icmp sgt <8 x i16> %arg0, %arg1
@@ -1662,15 +1995,15 @@ define <2 x i64> @test_mm_max_epi16(<2 x
 }
 
 define <2 x i64> @test_mm_max_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_max_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    pmaxub %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_max_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    pmaxub %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_max_epu8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmaxub %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_max_epu8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %cmp = icmp ugt <16 x i8> %arg0, %arg1
@@ -1680,60 +2013,55 @@ define <2 x i64> @test_mm_max_epu8(<2 x
 }
 
 define <2 x double> @test_mm_max_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_max_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    maxpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_max_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    maxpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_max_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    maxpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_max_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
 
 define <2 x double> @test_mm_max_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_max_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    maxsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_max_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    maxsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_max_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    maxsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_max_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define void @test_mm_mfence() nounwind {
-; X32-LABEL: test_mm_mfence:
-; X32:       # %bb.0:
-; X32-NEXT:    mfence
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mfence:
-; X64:       # %bb.0:
-; X64-NEXT:    mfence
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_mfence:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    mfence
+; CHECK-NEXT:    ret{{[l|q]}}
   call void @llvm.x86.sse2.mfence()
   ret void
 }
 declare void @llvm.x86.sse2.mfence() nounwind readnone
 
 define <2 x i64> @test_mm_min_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_min_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pminsw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_min_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pminsw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_min_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pminsw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_min_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %cmp = icmp slt <8 x i16> %arg0, %arg1
@@ -1743,15 +2071,15 @@ define <2 x i64> @test_mm_min_epi16(<2 x
 }
 
 define <2 x i64> @test_mm_min_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_min_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    pminub %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_min_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    pminub %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_min_epu8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pminub %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_min_epu8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %cmp = icmp ult <16 x i8> %arg0, %arg1
@@ -1761,59 +2089,64 @@ define <2 x i64> @test_mm_min_epu8(<2 x
 }
 
 define <2 x double> @test_mm_min_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_min_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    minpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_min_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    minpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_min_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    minpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_min_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
 
 define <2 x double> @test_mm_min_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_min_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    minsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_min_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    minsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_min_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    minsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_min_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_move_epi64(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_move_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_move_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_move_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_move_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX-NEXT:    ret{{[l|q]}}
   %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_move_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_move_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_move_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_move_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_move_sd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_move_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX512-NEXT:    ret{{[l|q]}}
   %ext0 = extractelement <2 x double> %a1, i32 0
   %res0 = insertelement <2 x double> undef, double %ext0, i32 0
   %ext1 = extractelement <2 x double> %a0, i32 1
@@ -1822,15 +2155,15 @@ define <2 x double> @test_mm_move_sd(<2
 }
 
 define i32 @test_mm_movemask_epi8(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_movemask_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovmskb %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_movemask_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovmskb %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_movemask_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_movemask_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovmskb %xmm0, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %arg0)
   ret i32 %res
@@ -1838,36 +2171,44 @@ define i32 @test_mm_movemask_epi8(<2 x i
 declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
 
 define i32 @test_mm_movemask_pd(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_movemask_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movmskpd %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_movemask_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movmskpd %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_movemask_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movmskpd %xmm0, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_movemask_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovmskpd %xmm0, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_mul_epu32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_mul_epu32:
-; X32:       # %bb.0:
-; X32-NEXT:    movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
-; X32-NEXT:    pand %xmm2, %xmm0
-; X32-NEXT:    pand %xmm2, %xmm1
-; X32-NEXT:    pmuludq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mul_epu32:
-; X64:       # %bb.0:
-; X64-NEXT:    movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
-; X64-NEXT:    pand %xmm2, %xmm0
-; X64-NEXT:    pand %xmm2, %xmm1
-; X64-NEXT:    pmuludq %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mul_epu32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    pmuludq %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_mul_epu32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_mul_epu32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX512-NEXT:    vpmullq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %A = and <2 x i64> %a0, <i64 4294967295, i64 4294967295>
   %B = and <2 x i64> %a1, <i64 4294967295, i64 4294967295>
   %res = mul nuw <2 x i64> %A, %B
@@ -1875,29 +2216,29 @@ define <2 x i64> @test_mm_mul_epu32(<2 x
 }
 
 define <2 x double> @test_mm_mul_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_mul_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    mulpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mul_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    mulpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mul_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    mulpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_mul_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = fmul <2 x double> %a0, %a1
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_mul_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_mul_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    mulsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mul_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    mulsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mul_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    mulsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_mul_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %ext0 = extractelement <2 x double> %a0, i32 0
   %ext1 = extractelement <2 x double> %a1, i32 0
   %fmul = fmul double %ext0, %ext1
@@ -1906,15 +2247,15 @@ define <2 x double> @test_mm_mul_sd(<2 x
 }
 
 define <2 x i64> @test_mm_mulhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_mulhi_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pmulhw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mulhi_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pmulhw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mulhi_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmulhw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_mulhi_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -1924,15 +2265,15 @@ define <2 x i64> @test_mm_mulhi_epi16(<2
 declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_mulhi_epu16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_mulhi_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    pmulhuw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mulhi_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    pmulhuw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mulhi_epu16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmulhuw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_mulhi_epu16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -1942,15 +2283,15 @@ define <2 x i64> @test_mm_mulhi_epu16(<2
 declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_mullo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_mullo_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pmullw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mullo_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pmullw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mullo_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmullw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_mullo_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = mul <8 x i16> %arg0, %arg1
@@ -1959,15 +2300,15 @@ define <2 x i64> @test_mm_mullo_epi16(<2
 }
 
 define <2 x double> @test_mm_or_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_or_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    orps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_or_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    orps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_or_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_or_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x double> %a0 to <4 x i32>
   %arg1 = bitcast <2 x double> %a1 to <4 x i32>
   %res = or <4 x i32> %arg0, %arg1
@@ -1976,29 +2317,29 @@ define <2 x double> @test_mm_or_pd(<2 x
 }
 
 define <2 x i64> @test_mm_or_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_or_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    orps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_or_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    orps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_or_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_or_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = or <2 x i64> %a0, %a1
   ret <2 x i64> %res
 }
 
 define <2 x i64> @test_mm_packs_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_packs_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    packsswb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_packs_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    packsswb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_packs_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    packsswb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_packs_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -2008,15 +2349,15 @@ define <2 x i64> @test_mm_packs_epi16(<2
 declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_packs_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_packs_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    packssdw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_packs_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    packssdw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_packs_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_packs_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %arg0, <4 x i32> %arg1)
@@ -2026,15 +2367,15 @@ define <2 x i64> @test_mm_packs_epi32(<2
 declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_packus_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_packus_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    packuswb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_packus_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    packuswb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_packus_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    packuswb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_packus_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -2044,30 +2385,25 @@ define <2 x i64> @test_mm_packus_epi16(<
 declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
 
 define void @test_mm_pause() nounwind {
-; X32-LABEL: test_mm_pause:
-; X32:       # %bb.0:
-; X32-NEXT:    pause
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_pause:
-; X64:       # %bb.0:
-; X64-NEXT:    pause
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_pause:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pause
+; CHECK-NEXT:    ret{{[l|q]}}
   call void @llvm.x86.sse2.pause()
   ret void
 }
 declare void @llvm.x86.sse2.pause() nounwind readnone
 
 define <2 x i64> @test_mm_sad_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_sad_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    psadbw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sad_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    psadbw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sad_epu8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psadbw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sad_epu8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %arg0, <16 x i8> %arg1)
@@ -2075,108 +2411,180 @@ define <2 x i64> @test_mm_sad_epu8(<2 x
 }
 declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
 
-define <2 x i64> @test_mm_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
-; X32-LABEL: test_mm_set_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm1
-; X32-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm2
-; X32-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm3
-; X32-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm1
-; X32-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm2
-; X32-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm3
-; X32-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm2
-; X32-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm4
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl %sil, %eax
-; X64-NEXT:    movd %eax, %xmm1
-; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT:    movzbl %dl, %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl %cl, %eax
-; X64-NEXT:    movd %eax, %xmm2
-; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; X64-NEXT:    movzbl %r8b, %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl %r9b, %eax
-; X64-NEXT:    movd %eax, %xmm3
-; X64-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm1
-; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm2
-; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm3
-; X64-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm2
-; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm4
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+define <2 x i64> @test_mm_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
+; X86-SSE-LABEL: test_mm_set_epi8:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm1
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm2
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm3
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm1
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm2
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm3
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm2
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm4
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_set_epi8:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set_epi8:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movzbl %dil, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl %sil, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm1
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X64-SSE-NEXT:    movzbl %dl, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl %cl, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm2
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X64-SSE-NEXT:    movzbl %r8b, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl %r9b, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm3
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm1
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm2
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm3
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm2
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm4
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_set_epi8:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vmovd %eax, %xmm0
+; X64-AVX-NEXT:    vpinsrb $1, %r10d, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %r9b, %eax
+; X64-AVX-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %r8b, %eax
+; X64-AVX-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %cl, %eax
+; X64-AVX-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %dl, %eax
+; X64-AVX-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %sil, %eax
+; X64-AVX-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %dil, %eax
+; X64-AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <16 x i8> undef,  i8 %a15, i32 0
   %res1  = insertelement <16 x i8> %res0,  i8 %a14, i32 1
   %res2  = insertelement <16 x i8> %res1,  i8 %a13, i32 2
@@ -2198,53 +2606,87 @@ define <2 x i64> @test_mm_set_epi8(i8 %a
 }
 
 define <2 x i64> @test_mm_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
-; X32-LABEL: test_mm_set_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm1
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm2
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm3
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm4
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm5
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm6
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm7
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; X32-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
-; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %edi, %xmm0
-; X64-NEXT:    movd %esi, %xmm1
-; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-NEXT:    movd %edx, %xmm0
-; X64-NEXT:    movd %ecx, %xmm2
-; X64-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X64-NEXT:    movd %r8d, %xmm0
-; X64-NEXT:    movd %r9d, %xmm1
-; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-NEXT:    movd %eax, %xmm3
-; X64-NEXT:    movd %r10d, %xmm0
-; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set_epi16:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm1
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm2
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm3
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm4
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm5
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm6
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm7
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_set_epi16:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovd %eax, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set_epi16:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
+; X64-SSE-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %edi, %xmm0
+; X64-SSE-NEXT:    movd %esi, %xmm1
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    movd %edx, %xmm0
+; X64-SSE-NEXT:    movd %ecx, %xmm2
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X64-SSE-NEXT:    movd %r8d, %xmm0
+; X64-SSE-NEXT:    movd %r9d, %xmm1
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    movd %eax, %xmm3
+; X64-SSE-NEXT:    movd %r10d, %xmm0
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_set_epi16:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
+; X64-AVX-NEXT:    vmovd %eax, %xmm0
+; X64-AVX-NEXT:    vpinsrw $1, %r10d, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $2, %r9d, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $3, %r8d, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $4, %ecx, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $5, %edx, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $7, %edi, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <8 x i16> undef, i16 %a7, i32 0
   %res1  = insertelement <8 x i16> %res0, i16 %a6, i32 1
   %res2  = insertelement <8 x i16> %res1, i16 %a5, i32 2
@@ -2258,27 +2700,43 @@ define <2 x i64> @test_mm_set_epi16(i16
 }
 
 define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
-; X32-LABEL: test_mm_set_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    movd %edi, %xmm0
-; X64-NEXT:    movd %esi, %xmm1
-; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X64-NEXT:    movd %edx, %xmm2
-; X64-NEXT:    movd %ecx, %xmm0
-; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set_epi32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_set_epi32:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set_epi32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd %edi, %xmm0
+; X64-SSE-NEXT:    movd %esi, %xmm1
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-SSE-NEXT:    movd %edx, %xmm2
+; X64-SSE-NEXT:    movd %ecx, %xmm0
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_set_epi32:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovd %ecx, %xmm0
+; X64-AVX-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrd $2, %esi, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <4 x i32> undef, i32 %a3, i32 0
   %res1  = insertelement <4 x i32> %res0, i32 %a2, i32 1
   %res2  = insertelement <4 x i32> %res1, i32 %a1, i32 2
@@ -2290,96 +2748,172 @@ define <2 x i64> @test_mm_set_epi32(i32
 ; TODO test_mm_set_epi64
 
 define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
-; X32-LABEL: test_mm_set_epi64x:
-; X32:       # %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set_epi64x:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %xmm1
-; X64-NEXT:    movq %rsi, %xmm0
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set_epi64x:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_set_epi64x:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set_epi64x:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq %rdi, %xmm1
+; X64-SSE-NEXT:    movq %rsi, %xmm0
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_set_epi64x:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovq %rdi, %xmm0
+; X64-AVX-NEXT:    vmovq %rsi, %xmm1
+; X64-AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <2 x i64> undef, i64 %a1, i32 0
   %res1  = insertelement <2 x i64> %res0, i64 %a0, i32 1
   ret <2 x i64> %res1
 }
 
 define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
-; X32-LABEL: test_mm_set_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; X64-NEXT:    movaps %xmm1, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_set_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-SSE-NEXT:    movaps %xmm1, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_set_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a1, i32 0
   %res1  = insertelement <2 x double> %res0, double %a0, i32 1
   ret <2 x double> %res1
 }
 
 define <2 x double> @test_mm_set_pd1(double %a0) nounwind {
-; X32-LABEL: test_mm_set_pd1:
-; X32:       # %bb.0:
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set_pd1:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set_pd1:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_set_pd1:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set_pd1:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_set_pd1:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a0, i32 0
   %res1  = insertelement <2 x double> %res0, double %a0, i32 1
   ret <2 x double> %res1
 }
 
 define <2 x double> @test_mm_set_sd(double %a0) nounwind {
-; X32-LABEL: test_mm_set_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set_sd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_set_sd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set_sd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_set_sd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a0, i32 0
   %res1  = insertelement <2 x double> %res0, double 0.0, i32 1
   ret <2 x double> %res1
 }
 
 define <2 x i64> @test_mm_set1_epi8(i8 %a0) nounwind {
-; X32-LABEL: test_mm_set1_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set1_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X64-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set1_epi8:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X86-SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: test_mm_set1_epi8:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX1-NEXT:    vmovd %eax, %xmm0
+; X86-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X86-AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX512-LABEL: test_mm_set1_epi8:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-AVX512-NEXT:    vpbroadcastb %eax, %xmm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set1_epi8:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movzbl %dil, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: test_mm_set1_epi8:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    movzbl %dil, %eax
+; X64-AVX1-NEXT:    vmovd %eax, %xmm0
+; X64-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: test_mm_set1_epi8:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vpbroadcastb %edi, %xmm0
+; X64-AVX512-NEXT:    retq
   %res0  = insertelement <16 x i8> undef,  i8 %a0, i32 0
   %res1  = insertelement <16 x i8> %res0,  i8 %a0, i32 1
   %res2  = insertelement <16 x i8> %res1,  i8 %a0, i32 2
@@ -2401,20 +2935,46 @@ define <2 x i64> @test_mm_set1_epi8(i8 %
 }
 
 define <2 x i64> @test_mm_set1_epi16(i16 %a0) nounwind {
-; X32-LABEL: test_mm_set1_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set1_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    movd %edi, %xmm0
-; X64-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set1_epi16:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: test_mm_set1_epi16:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX1-NEXT:    vmovd %eax, %xmm0
+; X86-AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
+; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX512-LABEL: test_mm_set1_epi16:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    vpbroadcastw %eax, %xmm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set1_epi16:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd %edi, %xmm0
+; X64-SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: test_mm_set1_epi16:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vmovd %edi, %xmm0
+; X64-AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
+; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: test_mm_set1_epi16:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vpbroadcastw %edi, %xmm0
+; X64-AVX512-NEXT:    retq
   %res0  = insertelement <8 x i16> undef, i16 %a0, i32 0
   %res1  = insertelement <8 x i16> %res0, i16 %a0, i32 1
   %res2  = insertelement <8 x i16> %res1, i16 %a0, i32 2
@@ -2428,17 +2988,40 @@ define <2 x i64> @test_mm_set1_epi16(i16
 }
 
 define <2 x i64> @test_mm_set1_epi32(i32 %a0) nounwind {
-; X32-LABEL: test_mm_set1_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set1_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    movd %edi, %xmm0
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set1_epi32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: test_mm_set1_epi32:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX512-LABEL: test_mm_set1_epi32:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    vpbroadcastd %eax, %xmm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set1_epi32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd %edi, %xmm0
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: test_mm_set1_epi32:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vmovd %edi, %xmm0
+; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: test_mm_set1_epi32:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vpbroadcastd %edi, %xmm0
+; X64-AVX512-NEXT:    retq
   %res0  = insertelement <4 x i32> undef, i32 %a0, i32 0
   %res1  = insertelement <4 x i32> %res0, i32 %a0, i32 1
   %res2  = insertelement <4 x i32> %res1, i32 %a0, i32 2
@@ -2450,142 +3033,253 @@ define <2 x i64> @test_mm_set1_epi32(i32
 ; TODO test_mm_set1_epi64
 
 define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
-; X32-LABEL: test_mm_set1_epi64x:
-; X32:       # %bb.0:
-; X32-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set1_epi64x:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %xmm0
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set1_epi64x:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: test_mm_set1_epi64x:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX1-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX1-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX512-LABEL: test_mm_set1_epi64x:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX512-NEXT:    vpbroadcastq %xmm0, %xmm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set1_epi64x:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq %rdi, %xmm0
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: test_mm_set1_epi64x:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vmovq %rdi, %xmm0
+; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: test_mm_set1_epi64x:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vpbroadcastq %rdi, %xmm0
+; X64-AVX512-NEXT:    retq
   %res0  = insertelement <2 x i64> undef, i64 %a0, i32 0
   %res1  = insertelement <2 x i64> %res0, i64 %a0, i32 1
   ret <2 x i64> %res1
 }
 
 define <2 x double> @test_mm_set1_pd(double %a0) nounwind {
-; X32-LABEL: test_mm_set1_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_set1_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_set1_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_set1_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_set1_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_set1_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a0, i32 0
   %res1  = insertelement <2 x double> %res0, double %a0, i32 1
   ret <2 x double> %res1
 }
 
 define <2 x i64> @test_mm_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
-; X32-LABEL: test_mm_setr_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm1
-; X32-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm2
-; X32-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm3
-; X32-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm1
-; X32-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm2
-; X32-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm3
-; X32-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm2
-; X32-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm4
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_setr_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm1
-; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm2
-; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm3
-; X64-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm1
-; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movd %eax, %xmm2
-; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X64-NEXT:    movzbl %r9b, %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl %r8b, %eax
-; X64-NEXT:    movd %eax, %xmm3
-; X64-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; X64-NEXT:    movzbl %cl, %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movzbl %dl, %eax
-; X64-NEXT:    movd %eax, %xmm2
-; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X64-NEXT:    movzbl %sil, %eax
-; X64-NEXT:    movd %eax, %xmm4
-; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_setr_epi8:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm1
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm2
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm3
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm1
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm2
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm3
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm2
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm4
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_setr_epi8:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_setr_epi8:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm1
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm2
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm3
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm1
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm2
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X64-SSE-NEXT:    movzbl %r9b, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl %r8b, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm3
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X64-SSE-NEXT:    movzbl %cl, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movzbl %dl, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm2
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X64-SSE-NEXT:    movzbl %sil, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm4
+; X64-SSE-NEXT:    movzbl %dil, %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_setr_epi8:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movzbl %sil, %eax
+; X64-AVX-NEXT:    movzbl %dil, %esi
+; X64-AVX-NEXT:    vmovd %esi, %xmm0
+; X64-AVX-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %dl, %eax
+; X64-AVX-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %cl, %eax
+; X64-AVX-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %r8b, %eax
+; X64-AVX-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl %r9b, %eax
+; X64-AVX-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <16 x i8> undef,  i8 %a0 , i32 0
   %res1  = insertelement <16 x i8> %res0,  i8 %a1 , i32 1
   %res2  = insertelement <16 x i8> %res1,  i8 %a2 , i32 2
@@ -2607,53 +3301,87 @@ define <2 x i64> @test_mm_setr_epi8(i8 %
 }
 
 define <2 x i64> @test_mm_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
-; X32-LABEL: test_mm_setr_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm1
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm2
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm3
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm4
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm5
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm6
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm7
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movd %eax, %xmm0
-; X32-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; X32-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; X32-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_setr_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    movd %r10d, %xmm1
-; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-NEXT:    movd %r9d, %xmm0
-; X64-NEXT:    movd %r8d, %xmm2
-; X64-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X64-NEXT:    movd %ecx, %xmm0
-; X64-NEXT:    movd %edx, %xmm1
-; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-NEXT:    movd %esi, %xmm3
-; X64-NEXT:    movd %edi, %xmm0
-; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_setr_epi16:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm1
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm2
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm3
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm4
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm5
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm6
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm7
+; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_setr_epi16:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovd %eax, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_setr_epi16:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
+; X64-SSE-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    movd %r10d, %xmm1
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    movd %r9d, %xmm0
+; X64-SSE-NEXT:    movd %r8d, %xmm2
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X64-SSE-NEXT:    movd %ecx, %xmm0
+; X64-SSE-NEXT:    movd %edx, %xmm1
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    movd %esi, %xmm3
+; X64-SSE-NEXT:    movd %edi, %xmm0
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_setr_epi16:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
+; X64-AVX-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
+; X64-AVX-NEXT:    vmovd %edi, %xmm0
+; X64-AVX-NEXT:    vpinsrw $1, %esi, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $2, %edx, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $3, %ecx, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $4, %r8d, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $5, %r9d, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrw $7, %r10d, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <8 x i16> undef, i16 %a0, i32 0
   %res1  = insertelement <8 x i16> %res0, i16 %a1, i32 1
   %res2  = insertelement <8 x i16> %res1, i16 %a2, i32 2
@@ -2667,27 +3395,43 @@ define <2 x i64> @test_mm_setr_epi16(i16
 }
 
 define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
-; X32-LABEL: test_mm_setr_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_setr_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    movd %ecx, %xmm0
-; X64-NEXT:    movd %edx, %xmm1
-; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X64-NEXT:    movd %esi, %xmm2
-; X64-NEXT:    movd %edi, %xmm0
-; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_setr_epi32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_setr_epi32:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_setr_epi32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd %ecx, %xmm0
+; X64-SSE-NEXT:    movd %edx, %xmm1
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-SSE-NEXT:    movd %esi, %xmm2
+; X64-SSE-NEXT:    movd %edi, %xmm0
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_setr_epi32:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovd %edi, %xmm0
+; X64-AVX-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <4 x i32> undef, i32 %a0, i32 0
   %res1  = insertelement <4 x i32> %res0, i32 %a1, i32 1
   %res2  = insertelement <4 x i32> %res1, i32 %a2, i32 2
@@ -2699,81 +3443,113 @@ define <2 x i64> @test_mm_setr_epi32(i32
 ; TODO test_mm_setr_epi64
 
 define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
-; X32-LABEL: test_mm_setr_epi64x:
-; X32:       # %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_setr_epi64x:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %rsi, %xmm1
-; X64-NEXT:    movq %rdi, %xmm0
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_setr_epi64x:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_setr_epi64x:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_setr_epi64x:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq %rsi, %xmm1
+; X64-SSE-NEXT:    movq %rdi, %xmm0
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_setr_epi64x:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovq %rsi, %xmm0
+; X64-AVX-NEXT:    vmovq %rdi, %xmm1
+; X64-AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <2 x i64> undef, i64 %a0, i32 0
   %res1  = insertelement <2 x i64> %res0, i64 %a1, i32 1
   ret <2 x i64> %res1
 }
 
 define <2 x double> @test_mm_setr_pd(double %a0, double %a1) nounwind {
-; X32-LABEL: test_mm_setr_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_setr_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_setr_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_setr_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_setr_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_setr_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a0, i32 0
   %res1  = insertelement <2 x double> %res0, double %a1, i32 1
   ret <2 x double> %res1
 }
 
 define <2 x double> @test_mm_setzero_pd() {
-; X32-LABEL: test_mm_setzero_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorps %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_setzero_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorps %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_setzero_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_setzero_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   ret <2 x double> zeroinitializer
 }
 
 define <2 x i64> @test_mm_setzero_si128() {
-; X32-LABEL: test_mm_setzero_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    xorps %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_setzero_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    xorps %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_setzero_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_setzero_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   ret <2 x i64> zeroinitializer
 }
 
 define <2 x i64> @test_mm_shuffle_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_shuffle_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_shuffle_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_shuffle_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_shuffle_epi32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_shuffle_epi32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = shufflevector <4 x i32> %arg0, <4 x i32> undef, <4 x i32> zeroinitializer
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -2781,29 +3557,29 @@ define <2 x i64> @test_mm_shuffle_epi32(
 }
 
 define <2 x double> @test_mm_shuffle_pd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_shuffle_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_shuffle_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_shuffle_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_shuffle_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; AVX-NEXT:    ret{{[l|q]}}
   %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
   ret <2 x double> %res
 }
 
 define <2 x i64> @test_mm_shufflehi_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_shufflehi_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_shufflehi_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_shufflehi_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_shufflehi_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = shufflevector <8 x i16> %arg0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -2811,15 +3587,15 @@ define <2 x i64> @test_mm_shufflehi_epi1
 }
 
 define <2 x i64> @test_mm_shufflelo_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_shufflelo_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_shufflelo_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_shufflelo_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_shufflelo_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = shufflevector <8 x i16> %arg0, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -2827,15 +3603,15 @@ define <2 x i64> @test_mm_shufflelo_epi1
 }
 
 define <2 x i64> @test_mm_sll_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sll_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    psllw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sll_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    psllw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sll_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sll_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -2845,15 +3621,15 @@ define <2 x i64> @test_mm_sll_epi16(<2 x
 declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_sll_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sll_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pslld %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sll_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pslld %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sll_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pslld %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sll_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %arg0, <4 x i32> %arg1)
@@ -2863,30 +3639,30 @@ define <2 x i64> @test_mm_sll_epi32(<2 x
 declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_sll_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sll_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    psllq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sll_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    psllq %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sll_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllq %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sll_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_slli_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    psllw $1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_slli_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    psllw $1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_slli_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_slli_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllw $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1)
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -2895,15 +3671,15 @@ define <2 x i64> @test_mm_slli_epi16(<2
 declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_slli_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pslld $1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_slli_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pslld $1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_slli_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pslld $1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_slli_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1)
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -2912,30 +3688,30 @@ define <2 x i64> @test_mm_slli_epi32(<2
 declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_slli_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    psllq $1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_slli_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    psllq $1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_slli_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllq $1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_slli_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllq $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_slli_si128(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_slli_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_slli_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_slli_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_slli_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = shufflevector <16 x i8> zeroinitializer, <16 x i8> %arg0, <16 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26>
   %bc = bitcast <16 x i8> %res to <2 x i64>
@@ -2943,32 +3719,31 @@ define <2 x i64> @test_mm_slli_si128(<2
 }
 
 define <2 x double> @test_mm_sqrt_pd(<2 x double> %a0) nounwind {
-; X32-LABEL: test_mm_sqrt_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    sqrtpd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sqrt_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    sqrtpd %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sqrt_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    sqrtpd %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sqrt_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vsqrtpd %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
 
 define <2 x double> @test_mm_sqrt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_sqrt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    sqrtsd %xmm0, %xmm1
-; X32-NEXT:    movapd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sqrt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    sqrtsd %xmm0, %xmm1
-; X64-NEXT:    movapd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sqrt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    sqrtsd %xmm0, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sqrt_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vsqrtsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %call = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0)
   %ext0 = extractelement <2 x double> %call, i32 0
   %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
@@ -2979,15 +3754,15 @@ define <2 x double> @test_mm_sqrt_sd(<2
 declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_sra_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sra_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    psraw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sra_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    psraw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sra_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psraw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sra_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsraw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -2997,15 +3772,15 @@ define <2 x i64> @test_mm_sra_epi16(<2 x
 declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_sra_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sra_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    psrad %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sra_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    psrad %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sra_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrad %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sra_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrad %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %arg0, <4 x i32> %arg1)
@@ -3015,15 +3790,15 @@ define <2 x i64> @test_mm_sra_epi32(<2 x
 declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_srai_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_srai_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    psraw $1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srai_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    psraw $1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srai_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psraw $1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srai_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %arg0, i32 1)
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -3032,15 +3807,15 @@ define <2 x i64> @test_mm_srai_epi16(<2
 declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_srai_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_srai_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    psrad $1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srai_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    psrad $1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srai_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrad $1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srai_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrad $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %arg0, i32 1)
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -3049,15 +3824,15 @@ define <2 x i64> @test_mm_srai_epi32(<2
 declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_srl_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_srl_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    psrlw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srl_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    psrlw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srl_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srl_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -3067,15 +3842,15 @@ define <2 x i64> @test_mm_srl_epi16(<2 x
 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_srl_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_srl_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    psrld %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srl_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    psrld %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srl_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrld %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srl_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %arg0, <4 x i32> %arg1)
@@ -3085,30 +3860,30 @@ define <2 x i64> @test_mm_srl_epi32(<2 x
 declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_srl_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_srl_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    psrlq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srl_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    psrlq %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srl_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srl_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_srli_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_srli_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    psrlw $1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srli_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    psrlw $1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srli_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srli_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %arg0, i32 1)
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -3117,15 +3892,15 @@ define <2 x i64> @test_mm_srli_epi16(<2
 declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_srli_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_srli_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    psrld $1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srli_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    psrld $1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srli_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrld $1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srli_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %arg0, i32 1)
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -3134,30 +3909,30 @@ define <2 x i64> @test_mm_srli_epi32(<2
 declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_srli_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_srli_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    psrlq $1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srli_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    psrlq $1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srli_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq $1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srli_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 1)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_srli_si128(<2 x i64> %a0) nounwind {
-; X32-LABEL: test_mm_srli_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_srli_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_srli_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_srli_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = shufflevector <16 x i8> %arg0, <16 x i8> zeroinitializer, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
   %bc = bitcast <16 x i8> %res to <2 x i64>
@@ -3165,34 +3940,58 @@ define <2 x i64> @test_mm_srli_si128(<2
 }
 
 define void @test_mm_store_pd(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_store_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movaps %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_store_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_store_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_store_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_store_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_store_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
   store <2 x double> %a1, <2 x double>* %arg0, align 16
   ret void
 }
 
 define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_store_pd1:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT:    movaps %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_store_pd1:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X64-NEXT:    movaps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_store_pd1:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_store_pd1:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X86-AVX-NEXT:    vmovapd %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_store_pd1:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_store_pd1:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-AVX-NEXT:    vmovapd %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double * %a0 to <2 x double>*
   %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
   store <2 x double> %shuf, <2 x double>* %arg0, align 16
@@ -3200,49 +3999,84 @@ define void @test_mm_store_pd1(double *%
 }
 
 define void @test_mm_store_sd(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_store_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movsd %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_store_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_store_sd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movsd %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_store_sd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_store_sd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movsd %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_store_sd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovsd %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %ext = extractelement <2 x double> %a1, i32 0
   store double %ext, double* %a0, align 1
   ret void
 }
 
 define void @test_mm_store_si128(<2 x i64> *%a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_store_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movaps %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_store_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_store_si128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_store_si128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_store_si128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_store_si128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   store <2 x i64> %a1, <2 x i64>* %a0, align 16
   ret void
 }
 
 define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_store1_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT:    movaps %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_store1_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X64-NEXT:    movaps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_store1_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_store1_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X86-AVX-NEXT:    vmovapd %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_store1_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_store1_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-AVX-NEXT:    vmovapd %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double * %a0 to <2 x double>*
   %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
   store <2 x double> %shuf, <2 x double>* %arg0, align 16
@@ -3250,35 +4084,60 @@ define void @test_mm_store1_pd(double *%
 }
 
 define void @test_mm_storeh_sd(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_storeh_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; X32-NEXT:    movsd %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_storeh_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; X64-NEXT:    movsd %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_storeh_sd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE-NEXT:    movsd %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_storeh_sd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; X86-AVX-NEXT:    vmovsd %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_storeh_sd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X64-SSE-NEXT:    movsd %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_storeh_sd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; X64-AVX-NEXT:    vmovsd %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %ext = extractelement <2 x double> %a1, i32 1
   store double %ext, double* %a0, align 8
   ret void
 }
 
 define void @test_mm_storel_epi64(<2 x i64> *%a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_storel_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movlps %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_storel_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %xmm0, %rax
-; X64-NEXT:    movq %rax, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_storel_epi64:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movlps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_storel_epi64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovlps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_storel_epi64:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq %xmm0, %rax
+; X64-SSE-NEXT:    movq %rax, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_storel_epi64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovq %xmm0, %rax
+; X64-AVX-NEXT:    movq %rax, (%rdi)
+; X64-AVX-NEXT:    retq
   %ext = extractelement <2 x i64> %a1, i32 0
   %bc = bitcast <2 x i64> *%a0 to i64*
   store i64 %ext, i64* %bc, align 8
@@ -3286,34 +4145,58 @@ define void @test_mm_storel_epi64(<2 x i
 }
 
 define void @test_mm_storel_sd(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_storel_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movsd %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_storel_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_storel_sd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movsd %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_storel_sd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_storel_sd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movsd %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_storel_sd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovsd %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %ext = extractelement <2 x double> %a1, i32 0
   store double %ext, double* %a0, align 8
   ret void
 }
 
 define void @test_mm_storer_pd(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_storer_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; X32-NEXT:    movapd %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_storer_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; X64-NEXT:    movapd %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_storer_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; X86-SSE-NEXT:    movapd %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_storer_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; X86-AVX-NEXT:    vmovapd %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_storer_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; X64-SSE-NEXT:    movapd %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_storer_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; X64-AVX-NEXT:    vmovapd %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
   %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> <i32 1, i32 0>
   store <2 x double> %shuf, <2 x double>* %arg0, align 16
@@ -3321,59 +4204,92 @@ define void @test_mm_storer_pd(double *%
 }
 
 define void @test_mm_storeu_pd(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_storeu_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movups %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_storeu_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movups %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_storeu_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movups %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_storeu_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_storeu_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movups %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_storeu_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovups %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
   store <2 x double> %a1, <2 x double>* %arg0, align 1
   ret void
 }
 
 define void @test_mm_storeu_si128(<2 x i64> *%a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_storeu_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movups %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_storeu_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    movups %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_storeu_si128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movups %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_storeu_si128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_storeu_si128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movups %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_storeu_si128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovups %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   store <2 x i64> %a1, <2 x i64>* %a0, align 1
   ret void
 }
 
 define void @test_mm_stream_pd(double *%a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_stream_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movntps %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_stream_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movntps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_stream_pd:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movntps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_stream_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovntps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_stream_pd:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movntps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_stream_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovntps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
   store <2 x double> %a1, <2 x double>* %arg0, align 16, !nontemporal !0
   ret void
 }
 
 define void @test_mm_stream_si32(i32 *%a0, i32 %a1) {
-; X32-LABEL: test_mm_stream_si32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movntil %eax, (%ecx)
-; X32-NEXT:    retl
+; X86-LABEL: test_mm_stream_si32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movntil %eax, (%ecx)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_stream_si32:
 ; X64:       # %bb.0:
@@ -3384,30 +4300,41 @@ define void @test_mm_stream_si32(i32 *%a
 }
 
 define void @test_mm_stream_si128(<2 x i64> *%a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_stream_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movntps %xmm0, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_stream_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    movntps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_stream_si128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movntps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_stream_si128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovntps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_stream_si128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movntps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_stream_si128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovntps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   store <2 x i64> %a1, <2 x i64>* %a0, align 16, !nontemporal !0
   ret void
 }
 
 define <2 x i64> @test_mm_sub_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_sub_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    psubb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sub_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    psubb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sub_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sub_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = sub <16 x i8> %arg0, %arg1
@@ -3416,15 +4343,15 @@ define <2 x i64> @test_mm_sub_epi8(<2 x
 }
 
 define <2 x i64> @test_mm_sub_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_sub_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    psubw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sub_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    psubw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sub_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sub_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = sub <8 x i16> %arg0, %arg1
@@ -3433,15 +4360,15 @@ define <2 x i64> @test_mm_sub_epi16(<2 x
 }
 
 define <2 x i64> @test_mm_sub_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_sub_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    psubd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sub_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    psubd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sub_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sub_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = sub <4 x i32> %arg0, %arg1
@@ -3450,43 +4377,43 @@ define <2 x i64> @test_mm_sub_epi32(<2 x
 }
 
 define <2 x i64> @test_mm_sub_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_sub_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    psubq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sub_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    psubq %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sub_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubq %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sub_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = sub <2 x i64> %a0, %a1
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_sub_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_sub_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    subpd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sub_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    subpd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sub_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    subpd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sub_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vsubpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = fsub <2 x double> %a0, %a1
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_sub_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_sub_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    subsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sub_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    subsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_sub_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    subsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_sub_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %ext0 = extractelement <2 x double> %a0, i32 0
   %ext1 = extractelement <2 x double> %a1, i32 0
   %fsub = fsub double %ext0, %ext1
@@ -3495,15 +4422,15 @@ define <2 x double> @test_mm_sub_sd(<2 x
 }
 
 define <2 x i64> @test_mm_subs_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_subs_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    psubsb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_subs_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    psubsb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_subs_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_subs_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %arg0, <16 x i8> %arg1)
@@ -3513,15 +4440,15 @@ define <2 x i64> @test_mm_subs_epi8(<2 x
 declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_subs_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_subs_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    psubsw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_subs_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    psubsw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_subs_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_subs_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -3531,15 +4458,15 @@ define <2 x i64> @test_mm_subs_epi16(<2
 declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_subs_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_subs_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    psubusb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_subs_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    psubusb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_subs_epu8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_subs_epu8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %arg0, <16 x i8> %arg1)
@@ -3549,15 +4476,15 @@ define <2 x i64> @test_mm_subs_epu8(<2 x
 declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_subs_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_subs_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    psubusw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_subs_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    psubusw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_subs_epu16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_subs_epu16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -3567,159 +4494,151 @@ define <2 x i64> @test_mm_subs_epu16(<2
 declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define i32 @test_mm_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_ucomieq_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    ucomisd %xmm1, %xmm0
-; X32-NEXT:    setnp %al
-; X32-NEXT:    sete %cl
-; X32-NEXT:    andb %al, %cl
-; X32-NEXT:    movzbl %cl, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ucomieq_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    ucomisd %xmm1, %xmm0
-; X64-NEXT:    setnp %al
-; X64-NEXT:    sete %cl
-; X64-NEXT:    andb %al, %cl
-; X64-NEXT:    movzbl %cl, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ucomieq_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    setnp %al
+; SSE-NEXT:    sete %cl
+; SSE-NEXT:    andb %al, %cl
+; SSE-NEXT:    movzbl %cl, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ucomieq_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    setnp %al
+; AVX-NEXT:    sete %cl
+; AVX-NEXT:    andb %al, %cl
+; AVX-NEXT:    movzbl %cl, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_ucomige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_ucomige_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ucomisd %xmm1, %xmm0
-; X32-NEXT:    setae %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ucomige_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ucomisd %xmm1, %xmm0
-; X64-NEXT:    setae %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ucomige_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    setae %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ucomige_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    setae %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.ucomige.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_ucomigt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ucomisd %xmm1, %xmm0
-; X32-NEXT:    seta %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ucomigt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ucomisd %xmm1, %xmm0
-; X64-NEXT:    seta %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ucomigt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    seta %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ucomigt_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    seta %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.ucomigt.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_ucomile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_ucomile_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ucomisd %xmm0, %xmm1
-; X32-NEXT:    setae %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ucomile_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ucomisd %xmm0, %xmm1
-; X64-NEXT:    setae %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ucomile_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ucomisd %xmm0, %xmm1
+; SSE-NEXT:    setae %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ucomile_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vucomisd %xmm0, %xmm1
+; AVX-NEXT:    setae %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.ucomile.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_ucomilt_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ucomisd %xmm0, %xmm1
-; X32-NEXT:    seta %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ucomilt_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ucomisd %xmm0, %xmm1
-; X64-NEXT:    seta %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ucomilt_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ucomisd %xmm0, %xmm1
+; SSE-NEXT:    seta %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ucomilt_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vucomisd %xmm0, %xmm1
+; AVX-NEXT:    seta %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.ucomilt.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define i32 @test_mm_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_ucomineq_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    ucomisd %xmm1, %xmm0
-; X32-NEXT:    setp %al
-; X32-NEXT:    setne %cl
-; X32-NEXT:    orb %al, %cl
-; X32-NEXT:    movzbl %cl, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ucomineq_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    ucomisd %xmm1, %xmm0
-; X64-NEXT:    setp %al
-; X64-NEXT:    setne %cl
-; X64-NEXT:    orb %al, %cl
-; X64-NEXT:    movzbl %cl, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ucomineq_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    setne %cl
+; SSE-NEXT:    orb %al, %cl
+; SSE-NEXT:    movzbl %cl, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ucomineq_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    setne %cl
+; AVX-NEXT:    orb %al, %cl
+; AVX-NEXT:    movzbl %cl, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse2.ucomineq.sd(<2 x double> %a0, <2 x double> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define <2 x double> @test_mm_undefined_pd() {
-; X32-LABEL: test_mm_undefined_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_undefined_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_undefined_pd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   ret <2 x double> undef
 }
 
 define <2 x i64> @test_mm_undefined_si128() {
-; X32-LABEL: test_mm_undefined_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_undefined_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_undefined_si128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   ret <2 x i64> undef
 }
 
 define <2 x i64> @test_mm_unpackhi_epi8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_unpackhi_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpackhi_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpackhi_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpackhi_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = shufflevector <16 x i8> %arg0, <16 x i8> %arg1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -3728,15 +4647,15 @@ define <2 x i64> @test_mm_unpackhi_epi8(
 }
 
 define <2 x i64> @test_mm_unpackhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_unpackhi_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpackhi_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpackhi_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpackhi_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = shufflevector <8 x i16> %arg0, <8 x i16> %arg1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -3745,15 +4664,15 @@ define <2 x i64> @test_mm_unpackhi_epi16
 }
 
 define <2 x i64> @test_mm_unpackhi_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_unpackhi_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpackhi_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpackhi_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpackhi_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = shufflevector <4 x i32> %arg0,<4 x i32> %arg1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -3762,43 +4681,43 @@ define <2 x i64> @test_mm_unpackhi_epi32
 }
 
 define <2 x i64> @test_mm_unpackhi_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_unpackhi_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpackhi_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpackhi_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpackhi_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX-NEXT:    ret{{[l|q]}}
   %res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_unpackhi_pd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_unpackhi_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpackhi_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpackhi_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpackhi_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX-NEXT:    ret{{[l|q]}}
   %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
   ret <2 x double> %res
 }
 
 define <2 x i64> @test_mm_unpacklo_epi8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_unpacklo_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpacklo_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpacklo_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpacklo_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = shufflevector <16 x i8> %arg0, <16 x i8> %arg1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -3807,15 +4726,15 @@ define <2 x i64> @test_mm_unpacklo_epi8(
 }
 
 define <2 x i64> @test_mm_unpacklo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_unpacklo_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpacklo_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpacklo_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpacklo_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = shufflevector <8 x i16> %arg0, <8 x i16> %arg1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -3824,15 +4743,15 @@ define <2 x i64> @test_mm_unpacklo_epi16
 }
 
 define <2 x i64> @test_mm_unpacklo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_unpacklo_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpacklo_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpacklo_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpacklo_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = shufflevector <4 x i32> %arg0,<4 x i32> %arg1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -3841,43 +4760,43 @@ define <2 x i64> @test_mm_unpacklo_epi32
 }
 
 define <2 x i64> @test_mm_unpacklo_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_unpacklo_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpacklo_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpacklo_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpacklo_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT:    ret{{[l|q]}}
   %res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
   ret <2 x i64> %res
 }
 
 define <2 x double> @test_mm_unpacklo_pd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_unpacklo_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_unpacklo_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_unpacklo_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_unpacklo_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT:    ret{{[l|q]}}
   %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mm_xor_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
-; X32-LABEL: test_mm_xor_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    xorps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_xor_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    xorps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_xor_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_xor_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x double> %a0 to <4 x i32>
   %arg1 = bitcast <2 x double> %a1 to <4 x i32>
   %res = xor <4 x i32> %arg0, %arg1
@@ -3886,15 +4805,15 @@ define <2 x double> @test_mm_xor_pd(<2 x
 }
 
 define <2 x i64> @test_mm_xor_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X32-LABEL: test_mm_xor_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    xorps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_xor_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    xorps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_xor_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_xor_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = xor <2 x i64> %a0, %a1
   ret <2 x i64> %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll?rev=333832&r1=333831&r2=333832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll Sat Jun  2 12:43:14 2018
@@ -1,11 +1,29 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) {
-; CHECK-LABEL: test_x86_sse2_psll_dq_bs:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_psll_dq_bs:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pslldq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xf8,0x07]
+; SSE-NEXT:    ## xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_psll_dq_bs:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpslldq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf8,0x07]
+; AVX1-NEXT:    ## xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psll_dq_bs:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpslldq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x07]
+; AVX512-NEXT:    ## xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -13,20 +31,46 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq
 
 
 define <2 x i64> @test_x86_sse2_psrl_dq_bs(<2 x i64> %a0) {
-; CHECK-LABEL: test_x86_sse2_psrl_dq_bs:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_psrl_dq_bs:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    psrldq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xd8,0x07]
+; SSE-NEXT:    ## xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_psrl_dq_bs:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrldq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd8,0x07]
+; AVX1-NEXT:    ## xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrl_dq_bs:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrldq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x07]
+; AVX512-NEXT:    ## xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64>, i32) nounwind readnone
 
 define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
-; CHECK-LABEL: test_x86_sse2_psll_dq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_psll_dq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pslldq $1, %xmm0 ## encoding: [0x66,0x0f,0x73,0xf8,0x01]
+; SSE-NEXT:    ## xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_psll_dq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpslldq $1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf8,0x01]
+; AVX1-NEXT:    ## xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psll_dq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpslldq $1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x01]
+; AVX512-NEXT:    ## xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -34,10 +78,23 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq
 
 
 define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
-; CHECK-LABEL: test_x86_sse2_psrl_dq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_psrl_dq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    psrldq $1, %xmm0 ## encoding: [0x66,0x0f,0x73,0xd8,0x01]
+; SSE-NEXT:    ## xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_psrl_dq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrldq $1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd8,0x01]
+; AVX1-NEXT:    ## xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrl_dq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrldq $1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x01]
+; AVX512-NEXT:    ## xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -45,10 +102,20 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq
 
 
 define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvtdq2pd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    cvtdq2pd %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_cvtdq2pd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    cvtdq2pd %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0xe6,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_cvtdq2pd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtdq2pd %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0xe6,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtdq2pd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtdq2pd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0xe6,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -56,10 +123,20 @@ declare <2 x double> @llvm.x86.sse2.cvtd
 
 
 define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvtps2pd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    cvtps2pd %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_cvtps2pd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    cvtps2pd %xmm0, %xmm0 ## encoding: [0x0f,0x5a,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_cvtps2pd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtps2pd %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5a,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtps2pd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtps2pd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -67,11 +144,38 @@ declare <2 x double> @llvm.x86.sse2.cvtp
 
 
 define void @test_x86_sse2_storel_dq(i8* %a0, <4 x i32> %a1) {
-; CHECK-LABEL: test_x86_sse2_storel_dq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movlps %xmm0, (%eax)
-; CHECK-NEXT:    retl
+; X86-SSE-LABEL: test_x86_sse2_storel_dq:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movlps %xmm0, (%eax) ## encoding: [0x0f,0x13,0x00]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_storel_dq:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovlps %xmm0, (%eax) ## encoding: [0xc5,0xf8,0x13,0x00]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_storel_dq:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovlps %xmm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x00]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_storel_dq:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movlps %xmm0, (%rdi) ## encoding: [0x0f,0x13,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_storel_dq:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovlps %xmm0, (%rdi) ## encoding: [0xc5,0xf8,0x13,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_storel_dq:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovlps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   call void @llvm.x86.sse2.storel.dq(i8* %a0, <4 x i32> %a1)
   ret void
 }
@@ -80,13 +184,50 @@ declare void @llvm.x86.sse2.storel.dq(i8
 
 define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
   ; add operation forces the execution domain.
-; CHECK-LABEL: test_x86_sse2_storeu_dq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
-; CHECK-NEXT:    psubb %xmm1, %xmm0
-; CHECK-NEXT:    movdqu %xmm0, (%eax)
-; CHECK-NEXT:    retl
+; X86-SSE-LABEL: test_x86_sse2_storeu_dq:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    pcmpeqd %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x76,0xc9]
+; X86-SSE-NEXT:    psubb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf8,0xc1]
+; X86-SSE-NEXT:    movdqu %xmm0, (%eax) ## encoding: [0xf3,0x0f,0x7f,0x00]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_storeu_dq:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9]
+; X86-AVX1-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf8,0xc1]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, (%eax) ## encoding: [0xc5,0xfa,0x7f,0x00]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_storeu_dq:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9]
+; X86-AVX512-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1]
+; X86-AVX512-NEXT:    vmovdqu %xmm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x00]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_storeu_dq:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x76,0xc9]
+; X64-SSE-NEXT:    psubb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf8,0xc1]
+; X64-SSE-NEXT:    movdqu %xmm0, (%rdi) ## encoding: [0xf3,0x0f,0x7f,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_storeu_dq:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9]
+; X64-AVX1-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf8,0xc1]
+; X64-AVX1-NEXT:    vmovdqu %xmm0, (%rdi) ## encoding: [0xc5,0xfa,0x7f,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_storeu_dq:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9]
+; X64-AVX512-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1]
+; X64-AVX512-NEXT:    vmovdqu %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2)
   ret void
@@ -96,14 +237,70 @@ declare void @llvm.x86.sse2.storeu.dq(i8
 
 define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
   ; fadd operation forces the execution domain.
-; CHECK-LABEL: test_x86_sse2_storeu_pd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    xorpd %xmm1, %xmm1
-; CHECK-NEXT:    movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; CHECK-NEXT:    addpd %xmm0, %xmm1
-; CHECK-NEXT:    movupd %xmm1, (%eax)
-; CHECK-NEXT:    retl
+; X86-SSE-LABEL: test_x86_sse2_storeu_pd:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    xorpd %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x57,0xc9]
+; X86-SSE-NEXT:    movhpd LCPI8_0, %xmm1 ## encoding: [0x66,0x0f,0x16,0x0d,A,A,A,A]
+; X86-SSE-NEXT:    ## fixup A - offset: 4, value: LCPI8_0, kind: FK_Data_4
+; X86-SSE-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; X86-SSE-NEXT:    addpd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x58,0xc8]
+; X86-SSE-NEXT:    movupd %xmm1, (%eax) ## encoding: [0x66,0x0f,0x11,0x08]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_storeu_pd:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9]
+; X86-AVX1-NEXT:    vmovhpd LCPI8_0, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
+; X86-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI8_0, kind: FK_Data_4
+; X86-AVX1-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; X86-AVX1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x58,0xc1]
+; X86-AVX1-NEXT:    vmovupd %xmm0, (%eax) ## encoding: [0xc5,0xf9,0x11,0x00]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_storeu_pd:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovsd LCPI8_0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x0d,A,A,A,A]
+; X86-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI8_0, kind: FK_Data_4
+; X86-AVX512-NEXT:    ## xmm1 = mem[0],zero
+; X86-AVX512-NEXT:    vpslldq $8, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x73,0xf9,0x08]
+; X86-AVX512-NEXT:    ## xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; X86-AVX512-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
+; X86-AVX512-NEXT:    vmovupd %xmm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x00]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_storeu_pd:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    xorpd %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x57,0xc9]
+; X64-SSE-NEXT:    movhpd {{.*}}(%rip), %xmm1 ## encoding: [0x66,0x0f,0x16,0x0d,A,A,A,A]
+; X64-SSE-NEXT:    ## fixup A - offset: 4, value: LCPI8_0-4, kind: reloc_riprel_4byte
+; X64-SSE-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; X64-SSE-NEXT:    addpd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x58,0xc8]
+; X64-SSE-NEXT:    movupd %xmm1, (%rdi) ## encoding: [0x66,0x0f,0x11,0x0f]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_storeu_pd:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9]
+; X64-AVX1-NEXT:    vmovhpd {{.*}}(%rip), %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
+; X64-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI8_0-4, kind: reloc_riprel_4byte
+; X64-AVX1-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; X64-AVX1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x58,0xc1]
+; X64-AVX1-NEXT:    vmovupd %xmm0, (%rdi) ## encoding: [0xc5,0xf9,0x11,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_storeu_pd:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovsd {{.*}}(%rip), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x0d,A,A,A,A]
+; X64-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI8_0-4, kind: reloc_riprel_4byte
+; X64-AVX512-NEXT:    ## xmm1 = mem[0],zero
+; X64-AVX512-NEXT:    vpslldq $8, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x73,0xf9,0x08]
+; X64-AVX512-NEXT:    ## xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; X64-AVX512-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
+; X64-AVX512-NEXT:    vmovupd %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a2 = fadd <2 x double> %a1, <double 0x0, double 0x4200000000000000>
   call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2)
   ret void
@@ -111,10 +308,23 @@ define void @test_x86_sse2_storeu_pd(i8*
 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
 
 define <4 x i32> @test_x86_sse2_pshuf_d(<4 x i32> %a) {
-; CHECK-LABEL: test_x86_sse2_pshuf_d:
-; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_pshuf_d:
+; SSE:       ## %bb.0: ## %entry
+; SSE-NEXT:    pshufd $27, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x70,0xc0,0x1b]
+; SSE-NEXT:    ## xmm0 = xmm0[3,2,1,0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_pshuf_d:
+; AVX1:       ## %bb.0: ## %entry
+; AVX1-NEXT:    vpermilps $27, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0x1b]
+; AVX1-NEXT:    ## xmm0 = xmm0[3,2,1,0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pshuf_d:
+; AVX512:       ## %bb.0: ## %entry
+; AVX512-NEXT:    vpermilps $27, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0x1b]
+; AVX512-NEXT:    ## xmm0 = xmm0[3,2,1,0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 entry:
   %res = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) nounwind readnone
   ret <4 x i32> %res
@@ -122,10 +332,23 @@ entry:
 declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_pshufl_w(<8 x i16> %a) {
-; CHECK-LABEL: test_x86_sse2_pshufl_w:
-; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_pshufl_w:
+; SSE:       ## %bb.0: ## %entry
+; SSE-NEXT:    pshuflw $27, %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x70,0xc0,0x1b]
+; SSE-NEXT:    ## xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_pshufl_w:
+; AVX1:       ## %bb.0: ## %entry
+; AVX1-NEXT:    vpshuflw $27, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x70,0xc0,0x1b]
+; AVX1-NEXT:    ## xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pshufl_w:
+; AVX512:       ## %bb.0: ## %entry
+; AVX512-NEXT:    vpshuflw $27, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x70,0xc0,0x1b]
+; AVX512-NEXT:    ## xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 entry:
   %res = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) nounwind readnone
   ret <8 x i16> %res
@@ -133,10 +356,23 @@ entry:
 declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_pshufh_w(<8 x i16> %a) {
-; CHECK-LABEL: test_x86_sse2_pshufh_w:
-; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_pshufh_w:
+; SSE:       ## %bb.0: ## %entry
+; SSE-NEXT:    pshufhw $27, %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x70,0xc0,0x1b]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_pshufh_w:
+; AVX1:       ## %bb.0: ## %entry
+; AVX1-NEXT:    vpshufhw $27, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x70,0xc0,0x1b]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pshufh_w:
+; AVX512:       ## %bb.0: ## %entry
+; AVX512-NEXT:    vpshufhw $27, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x70,0xc0,0x1b]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 entry:
   %res = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27) nounwind readnone
   ret <8 x i16> %res
@@ -144,50 +380,100 @@ entry:
 declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8) nounwind readnone
 
 define <16 x i8> @max_epu8(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: max_epu8:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmaxub %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: max_epu8:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xde,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: max_epu8:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xde,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: max_epu8:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %res
 }
 declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <16 x i8> @min_epu8(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: min_epu8:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pminub %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: min_epu8:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xda,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: min_epu8:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xda,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: min_epu8:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %res
 }
 declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @max_epi16(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: max_epi16:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmaxsw %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: max_epi16:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xee,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: max_epi16:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xee,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: max_epi16:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %res
 }
 declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @min_epi16(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: min_epi16:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pminsw %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: min_epi16:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xea,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: min_epi16:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xea,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: min_epi16:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %res
 }
 declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x double> @test_x86_sse2_add_sd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_add_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    addsd %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_add_sd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    addsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x58,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_add_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x58,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_add_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x58,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -195,10 +481,20 @@ declare <2 x double> @llvm.x86.sse2.add.
 
 
 define <2 x double> @test_x86_sse2_sub_sd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_sub_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    subsd %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_sub_sd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    subsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5c,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_sub_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5c,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_sub_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5c,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.sub.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -206,10 +502,20 @@ declare <2 x double> @llvm.x86.sse2.sub.
 
 
 define <2 x double> @test_x86_sse2_mul_sd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_mul_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    mulsd %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_mul_sd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    mulsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x59,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_mul_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x59,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_mul_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x59,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.mul.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -217,30 +523,60 @@ declare <2 x double> @llvm.x86.sse2.mul.
 
 
 define <2 x double> @test_x86_sse2_div_sd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_div_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    divsd %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_div_sd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    divsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5e,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_div_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5e,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_div_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5e,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.div.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind readnone
 
 define <16 x i8> @mm_avg_epu8(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: mm_avg_epu8:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pavgb %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: mm_avg_epu8:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pavgb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe0,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: mm_avg_epu8:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpavgb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe0,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: mm_avg_epu8:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpavgb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe0,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
 declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @mm_avg_epu16(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: mm_avg_epu16:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pavgw %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: mm_avg_epu16:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pavgw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe3,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: mm_avg_epu16:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpavgw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe3,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: mm_avg_epu16:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpavgw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe3,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -248,10 +584,20 @@ declare <8 x i16> @llvm.x86.sse2.pavg.w(
 
 
 define <2 x i64> @test_x86_sse2_pmulu_dq(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: test_x86_sse2_pmulu_dq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmuludq %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_pmulu_dq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmuludq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf4,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_pmulu_dq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf4,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pmulu_dq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf4,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -259,10 +605,35 @@ declare <2 x i64> @llvm.x86.sse2.pmulu.d
 
 
 define <2 x double> @test_x86_sse2_cvtsi2sd(<2 x double> %a0, i32 %a1) {
-; CHECK-LABEL: test_x86_sse2_cvtsi2sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0
-; CHECK-NEXT:    retl
+; X86-SSE-LABEL: test_x86_sse2_cvtsi2sd:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xf2,0x0f,0x2a,0x44,0x24,0x04]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_cvtsi2sd:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_cvtsi2sd:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_cvtsi2sd:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    cvtsi2sdl %edi, %xmm0 ## encoding: [0xf2,0x0f,0x2a,0xc7]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_cvtsi2sd:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0xc7]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_cvtsi2sd:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> %a0, i32 %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -270,10 +641,20 @@ declare <2 x double> @llvm.x86.sse2.cvts
 
 
 define <2 x double> @test_x86_sse2_cvtss2sd(<2 x double> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse2_cvtss2sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    cvtss2sd %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_cvtss2sd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    cvtss2sd %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5a,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_cvtss2sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtss2sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5a,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -281,13 +662,62 @@ declare <2 x double> @llvm.x86.sse2.cvts
 
 
 define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, <4 x float>* %p1) {
-; CHECK-LABEL: test_x86_sse2_cvtss2sd_load:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT:    cvtss2sd %xmm1, %xmm1
-; CHECK-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; CHECK-NEXT:    retl
+; X86-SSE-LABEL: test_x86_sse2_cvtss2sd_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movss (%eax), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x08]
+; X86-SSE-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    cvtss2sd %xmm1, %xmm1 ## encoding: [0xf3,0x0f,0x5a,0xc9]
+; X86-SSE-NEXT:    movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1]
+; X86-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_cvtss2sd_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovss (%eax), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x08]
+; X86-AVX1-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf2,0x5a,0xc9]
+; X86-AVX1-NEXT:    vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
+; X86-AVX1-NEXT:    ## xmm0 = xmm1[0,1],xmm0[2,3]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_cvtss2sd_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
+; X86-AVX512-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0xc9]
+; X86-AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
+; X86-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_cvtss2sd_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movss (%rdi), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x0f]
+; X64-SSE-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT:    cvtss2sd %xmm1, %xmm1 ## encoding: [0xf3,0x0f,0x5a,0xc9]
+; X64-SSE-NEXT:    movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1]
+; X64-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_cvtss2sd_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovss (%rdi), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x0f]
+; X64-AVX1-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf2,0x5a,0xc9]
+; X64-AVX1-NEXT:    vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
+; X64-AVX1-NEXT:    ## xmm0 = xmm1[0,1],xmm0[2,3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_cvtss2sd_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
+; X64-AVX512-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0xc9]
+; X64-AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
+; X64-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a1 = load <4 x float>, <4 x float>* %p1
   %res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
@@ -295,12 +725,50 @@ define <2 x double> @test_x86_sse2_cvtss
 
 
 define <2 x double> @test_x86_sse2_cvtss2sd_load_optsize(<2 x double> %a0, <4 x float>* %p1) optsize {
-; CHECK-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    cvtss2sd (%eax), %xmm1
-; CHECK-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; CHECK-NEXT:    retl
+; X86-SSE-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    cvtss2sd (%eax), %xmm1 ## encoding: [0xf3,0x0f,0x5a,0x08]
+; X86-SSE-NEXT:    movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1]
+; X86-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vcvtss2sd (%eax), %xmm1, %xmm1 ## encoding: [0xc5,0xf2,0x5a,0x08]
+; X86-AVX1-NEXT:    vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
+; X86-AVX1-NEXT:    ## xmm0 = xmm1[0,1],xmm0[2,3]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vcvtss2sd (%eax), %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0x08]
+; X86-AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
+; X86-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    cvtss2sd (%rdi), %xmm1 ## encoding: [0xf3,0x0f,0x5a,0x0f]
+; X64-SSE-NEXT:    movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1]
+; X64-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vcvtss2sd (%rdi), %xmm1, %xmm1 ## encoding: [0xc5,0xf2,0x5a,0x0f]
+; X64-AVX1-NEXT:    vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
+; X64-AVX1-NEXT:    ## xmm0 = xmm1[0,1],xmm0[2,3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vcvtss2sd (%rdi), %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0x0f]
+; X64-AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
+; X64-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a1 = load <4 x float>, <4 x float>* %p1
   %res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
@@ -308,10 +776,20 @@ define <2 x double> @test_x86_sse2_cvtss
 
 
 define <4 x float> @test_x86_sse2_cvtdq2ps(<4 x i32> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvtdq2ps:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    cvtdq2ps %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse2_cvtdq2ps:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0 ## encoding: [0x0f,0x5b,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_cvtdq2ps:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtdq2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5b,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtdq2ps:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtdq2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5b,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %a0) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll?rev=333832&r1=333831&r2=333832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll Sat Jun  2 12:43:14 2018
@@ -1,18 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_cmp_pd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpordpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xc2,0xc1,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; VCHECK-LABEL: test_x86_sse2_cmp_pd:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vcmpordpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc2,0xc1,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; AVX-LABEL: test_x86_sse2_cmp_pd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vcmpordpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc2,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -23,12 +26,12 @@ define <2 x double> @test_x86_sse2_cmp_s
 ; SSE-LABEL: test_x86_sse2_cmp_sd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpordsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0xc2,0xc1,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; VCHECK-LABEL: test_x86_sse2_cmp_sd:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vcmpordsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xc2,0xc1,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; AVX-LABEL: test_x86_sse2_cmp_sd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vcmpordsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xc2,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -43,25 +46,25 @@ define i32 @test_x86_sse2_comieq_sd(<2 x
 ; SSE-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
 ; SSE-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
 ; SSE-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_comieq_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX2-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX2-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX2-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX2-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_comieq_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
-; SKX-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; SKX-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
-; SKX-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
-; SKX-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_comieq_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
+; AVX1-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
+; AVX1-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
+; AVX1-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
+; AVX1-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_comieq_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
+; AVX512-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
+; AVX512-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
+; AVX512-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
+; AVX512-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -74,21 +77,21 @@ define i32 @test_x86_sse2_comige_sd(<2 x
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_comige_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX2-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_comige_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SKX-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
-; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_comige_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX1-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
+; AVX1-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_comige_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
+; AVX512-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.comige.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -101,21 +104,21 @@ define i32 @test_x86_sse2_comigt_sd(<2 x
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_comigt_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX2-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_comigt_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SKX-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
-; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_comigt_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX1-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
+; AVX1-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_comigt_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
+; AVX512-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.comigt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -128,21 +131,21 @@ define i32 @test_x86_sse2_comile_sd(<2 x
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2f,0xc8]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_comile_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX2-NEXT:    vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
-; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_comile_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SKX-NEXT:    vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
-; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_comile_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX1-NEXT:    vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
+; AVX1-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_comile_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512-NEXT:    vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
+; AVX512-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.comile.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -155,21 +158,21 @@ define i32 @test_x86_sse2_comilt_sd(<2 x
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2f,0xc8]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_comilt_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX2-NEXT:    vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
-; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_comilt_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SKX-NEXT:    vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
-; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_comilt_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX1-NEXT:    vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
+; AVX1-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_comilt_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512-NEXT:    vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
+; AVX512-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.comilt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -184,25 +187,25 @@ define i32 @test_x86_sse2_comineq_sd(<2
 ; SSE-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
 ; SSE-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
 ; SSE-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_comineq_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX2-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX2-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX2-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX2-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_comineq_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
-; SKX-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
-; SKX-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
-; SKX-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
-; SKX-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_comineq_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
+; AVX1-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
+; AVX1-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
+; AVX1-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
+; AVX1-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_comineq_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
+; AVX512-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
+; AVX512-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
+; AVX512-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
+; AVX512-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.comineq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -213,17 +216,17 @@ define <4 x i32> @test_x86_sse2_cvtpd2dq
 ; SSE-LABEL: test_x86_sse2_cvtpd2dq:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtpd2dq %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0xe6,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvtpd2dq:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvtpd2dq:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvtpd2dq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtpd2dq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -234,17 +237,17 @@ define <2 x i64> @test_mm_cvtpd_epi32_ze
 ; SSE-LABEL: test_mm_cvtpd_epi32_zext:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtpd2dq %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0xe6,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_mm_cvtpd_epi32_zext:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_mm_cvtpd_epi32_zext:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_mm_cvtpd_epi32_zext:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_mm_cvtpd_epi32_zext:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %cvt = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
   %res = shufflevector <4 x i32> %cvt, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -253,23 +256,38 @@ define <2 x i64> @test_mm_cvtpd_epi32_ze
 
 
 define <2 x i64> @test_mm_cvtpd_epi32_zext_load(<2 x double>* %p0) nounwind {
-; SSE-LABEL: test_mm_cvtpd_epi32_zext_load:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE-NEXT:    cvtpd2dq (%eax), %xmm0 ## encoding: [0xf2,0x0f,0xe6,0x00]
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_mm_cvtpd_epi32_zext_load:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT:    vcvtpd2dqx (%eax), %xmm0 ## encoding: [0xc5,0xfb,0xe6,0x00]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_mm_cvtpd_epi32_zext_load:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SKX-NEXT:    vcvtpd2dqx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0x00]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_mm_cvtpd_epi32_zext_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    cvtpd2dq (%eax), %xmm0 ## encoding: [0xf2,0x0f,0xe6,0x00]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_mm_cvtpd_epi32_zext_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vcvtpd2dqx (%eax), %xmm0 ## encoding: [0xc5,0xfb,0xe6,0x00]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_mm_cvtpd_epi32_zext_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vcvtpd2dqx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0x00]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_mm_cvtpd_epi32_zext_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    cvtpd2dq (%rdi), %xmm0 ## encoding: [0xf2,0x0f,0xe6,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_mm_cvtpd_epi32_zext_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vcvtpd2dqx (%rdi), %xmm0 ## encoding: [0xc5,0xfb,0xe6,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_mm_cvtpd_epi32_zext_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vcvtpd2dqx (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a0 = load <2 x double>, <2 x double>* %p0
   %cvt = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
   %res = shufflevector <4 x i32> %cvt, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -282,17 +300,17 @@ define <4 x float> @test_x86_sse2_cvtpd2
 ; SSE-LABEL: test_x86_sse2_cvtpd2ps:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtpd2ps %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5a,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvtpd2ps:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvtpd2ps:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvtpd2ps:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtpd2ps:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -302,40 +320,55 @@ define <4 x float> @test_x86_sse2_cvtpd2
 ; SSE-LABEL: test_x86_sse2_cvtpd2ps_zext:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtpd2ps %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5a,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvtpd2ps_zext:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvtpd2ps_zext:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvtpd2ps_zext:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtpd2ps_zext:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %cvt = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
   %res = shufflevector <4 x float> %cvt, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   ret <4 x float> %res
 }
 
 define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(<2 x double>* %p0) nounwind {
-; SSE-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE-NEXT:    cvtpd2ps (%eax), %xmm0 ## encoding: [0x66,0x0f,0x5a,0x00]
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT:    vcvtpd2psx (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x5a,0x00]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SKX-NEXT:    vcvtpd2psx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0x00]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    cvtpd2ps (%eax), %xmm0 ## encoding: [0x66,0x0f,0x5a,0x00]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vcvtpd2psx (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x5a,0x00]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vcvtpd2psx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0x00]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    cvtpd2ps (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x5a,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vcvtpd2psx (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x5a,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vcvtpd2psx (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a0 = load <2 x double>, <2 x double>* %p0
   %cvt = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
   %res = shufflevector <4 x float> %cvt, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -346,17 +379,17 @@ define <4 x i32> @test_x86_sse2_cvtps2dq
 ; SSE-LABEL: test_x86_sse2_cvtps2dq:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtps2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5b,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvtps2dq:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvtps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5b,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvtps2dq:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvtps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5b,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvtps2dq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5b,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtps2dq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5b,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -367,17 +400,17 @@ define i32 @test_x86_sse2_cvtsd2si(<2 x
 ; SSE-LABEL: test_x86_sse2_cvtsd2si:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsd2si %xmm0, %eax ## encoding: [0xf2,0x0f,0x2d,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvtsd2si:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvtsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2d,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvtsd2si:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvtsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2d,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvtsd2si:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2d,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvtsd2si:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2d,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -388,12 +421,12 @@ define <4 x float> @test_x86_sse2_cvtsd2
 ; SSE-LABEL: test_x86_sse2_cvtsd2ss:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsd2ss %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5a,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; VCHECK-LABEL: test_x86_sse2_cvtsd2ss:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vcvtsd2ss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0xc1]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; AVX-LABEL: test_x86_sse2_cvtsd2ss:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vcvtsd2ss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -401,17 +434,27 @@ declare <4 x float> @llvm.x86.sse2.cvtsd
 
 
 define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, <2 x double>* %p1) {
-; SSE-LABEL: test_x86_sse2_cvtsd2ss_load:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE-NEXT:    cvtsd2ss (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x00]
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse2_cvtsd2ss_load:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; VCHECK-NEXT:    vcvtsd2ss (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x00]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse2_cvtsd2ss_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    cvtsd2ss (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x00]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse2_cvtsd2ss_load:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vcvtsd2ss (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x00]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_cvtsd2ss_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    cvtsd2ss (%rdi), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse2_cvtsd2ss_load:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vcvtsd2ss (%rdi), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x07]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %a1 = load <2 x double>, <2 x double>* %p1
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
@@ -419,17 +462,27 @@ define <4 x float> @test_x86_sse2_cvtsd2
 
 
 define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, <2 x double>* %p1) optsize {
-; SSE-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE-NEXT:    cvtsd2ss (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x00]
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; VCHECK-NEXT:    vcvtsd2ss (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x00]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    cvtsd2ss (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x00]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vcvtsd2ss (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x00]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    cvtsd2ss (%rdi), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vcvtsd2ss (%rdi), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x07]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %a1 = load <2 x double>, <2 x double>* %p1
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
@@ -440,17 +493,17 @@ define <4 x i32> @test_x86_sse2_cvttpd2d
 ; SSE-LABEL: test_x86_sse2_cvttpd2dq:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttpd2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xe6,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvttpd2dq:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvttpd2dq:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvttpd2dq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvttpd2dq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -461,17 +514,17 @@ define <2 x i64> @test_mm_cvttpd_epi32_z
 ; SSE-LABEL: test_mm_cvttpd_epi32_zext:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttpd2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xe6,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_mm_cvttpd_epi32_zext:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_mm_cvttpd_epi32_zext:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_mm_cvttpd_epi32_zext:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_mm_cvttpd_epi32_zext:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %cvt = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
   %res = shufflevector <4 x i32> %cvt, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -480,23 +533,38 @@ define <2 x i64> @test_mm_cvttpd_epi32_z
 
 
 define <2 x i64> @test_mm_cvttpd_epi32_zext_load(<2 x double>* %p0) nounwind {
-; SSE-LABEL: test_mm_cvttpd_epi32_zext_load:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE-NEXT:    cvttpd2dq (%eax), %xmm0 ## encoding: [0x66,0x0f,0xe6,0x00]
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_mm_cvttpd_epi32_zext_load:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT:    vcvttpd2dqx (%eax), %xmm0 ## encoding: [0xc5,0xf9,0xe6,0x00]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_mm_cvttpd_epi32_zext_load:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SKX-NEXT:    vcvttpd2dqx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0x00]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_mm_cvttpd_epi32_zext_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    cvttpd2dq (%eax), %xmm0 ## encoding: [0x66,0x0f,0xe6,0x00]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_mm_cvttpd_epi32_zext_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vcvttpd2dqx (%eax), %xmm0 ## encoding: [0xc5,0xf9,0xe6,0x00]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_mm_cvttpd_epi32_zext_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vcvttpd2dqx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0x00]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_mm_cvttpd_epi32_zext_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    cvttpd2dq (%rdi), %xmm0 ## encoding: [0x66,0x0f,0xe6,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_mm_cvttpd_epi32_zext_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vcvttpd2dqx (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0xe6,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_mm_cvttpd_epi32_zext_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vcvttpd2dqx (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a0 = load <2 x double>, <2 x double>* %p0
   %cvt = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
   %res = shufflevector <4 x i32> %cvt, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -509,17 +577,17 @@ define <4 x i32> @test_x86_sse2_cvttps2d
 ; SSE-LABEL: test_x86_sse2_cvttps2dq:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttps2dq %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x5b,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvttps2dq:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvttps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5b,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvttps2dq:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvttps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5b,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvttps2dq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvttps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5b,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvttps2dq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvttps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5b,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -530,17 +598,17 @@ define i32 @test_x86_sse2_cvttsd2si(<2 x
 ; SSE-LABEL: test_x86_sse2_cvttsd2si:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttsd2si %xmm0, %eax ## encoding: [0xf2,0x0f,0x2c,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvttsd2si:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvttsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2c,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_cvttsd2si:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvttsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2c,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvttsd2si:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvttsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2c,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_cvttsd2si:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvttsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2c,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -551,17 +619,17 @@ define <2 x double> @test_x86_sse2_max_p
 ; SSE-LABEL: test_x86_sse2_max_pd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    maxpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x5f,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_max_pd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5f,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_max_pd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5f,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_max_pd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5f,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_max_pd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5f,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -572,17 +640,17 @@ define <2 x double> @test_x86_sse2_max_s
 ; SSE-LABEL: test_x86_sse2_max_sd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    maxsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5f,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_max_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5f,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_max_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5f,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_max_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5f,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_max_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5f,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -593,17 +661,17 @@ define <2 x double> @test_x86_sse2_min_p
 ; SSE-LABEL: test_x86_sse2_min_pd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    minpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x5d,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_min_pd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vminpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5d,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_min_pd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vminpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5d,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_min_pd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vminpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5d,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_min_pd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vminpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5d,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -614,17 +682,17 @@ define <2 x double> @test_x86_sse2_min_s
 ; SSE-LABEL: test_x86_sse2_min_sd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    minsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5d,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_min_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5d,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_min_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5d,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_min_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5d,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_min_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5d,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -635,12 +703,12 @@ define i32 @test_x86_sse2_movmsk_pd(<2 x
 ; SSE-LABEL: test_x86_sse2_movmsk_pd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    movmskpd %xmm0, %eax ## encoding: [0x66,0x0f,0x50,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; VCHECK-LABEL: test_x86_sse2_movmsk_pd:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vmovmskpd %xmm0, %eax ## encoding: [0xc5,0xf9,0x50,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; AVX-LABEL: test_x86_sse2_movmsk_pd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vmovmskpd %xmm0, %eax ## encoding: [0xc5,0xf9,0x50,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -651,17 +719,17 @@ define <8 x i16> @test_x86_sse2_packssdw
 ; SSE-LABEL: test_x86_sse2_packssdw_128:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    packssdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6b,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_packssdw_128:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x6b,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_packssdw_128:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_packssdw_128:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x6b,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_packssdw_128:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -669,26 +737,47 @@ declare <8 x i16> @llvm.x86.sse2.packssd
 
 
 define <8 x i16> @test_x86_sse2_packssdw_128_fold() {
-; SSE-LABEL: test_x86_sse2_packssdw_128_fold:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
-; SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
-; SSE-NEXT:    ## fixup A - offset: 3, value: LCPI30_0, kind: FK_Data_4
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse2_packssdw_128_fold:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
-; AVX2-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; AVX2-NEXT:    ## fixup A - offset: 4, value: LCPI30_0, kind: FK_Data_4
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_packssdw_128_fold:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vmovaps LCPI30_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,32767,32767,65535,32768]
-; SKX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; SKX-NEXT:    ## fixup A - offset: 4, value: LCPI30_0, kind: FK_Data_4
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse2_packssdw_128_fold:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
+; X86-SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; X86-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI30_0, kind: FK_Data_4
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_packssdw_128_fold:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
+; X86-AVX1-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI30_0, kind: FK_Data_4
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_packssdw_128_fold:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vmovaps LCPI30_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,32767,32767,65535,32768]
+; X86-AVX512-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI30_0, kind: FK_Data_4
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_packssdw_128_fold:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
+; X64-SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; X64-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI30_0-4, kind: reloc_riprel_4byte
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_packssdw_128_fold:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
+; X64-AVX1-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI30_0-4, kind: reloc_riprel_4byte
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_packssdw_128_fold:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovaps {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,32767,32767,65535,32768]
+; X64-AVX512-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI30_0-4, kind: reloc_riprel_4byte
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
   ret <8 x i16> %res
 }
@@ -698,17 +787,17 @@ define <16 x i8> @test_x86_sse2_packsswb
 ; SSE-LABEL: test_x86_sse2_packsswb_128:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    packsswb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x63,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_packsswb_128:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x63,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_packsswb_128:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_packsswb_128:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x63,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_packsswb_128:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -716,26 +805,47 @@ declare <16 x i8> @llvm.x86.sse2.packssw
 
 
 define <16 x i8> @test_x86_sse2_packsswb_128_fold() {
-; SSE-LABEL: test_x86_sse2_packsswb_128_fold:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
-; SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
-; SSE-NEXT:    ## fixup A - offset: 3, value: LCPI32_0, kind: FK_Data_4
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse2_packsswb_128_fold:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
-; AVX2-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; AVX2-NEXT:    ## fixup A - offset: 4, value: LCPI32_0, kind: FK_Data_4
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_packsswb_128_fold:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vmovaps LCPI32_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
-; SKX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; SKX-NEXT:    ## fixup A - offset: 4, value: LCPI32_0, kind: FK_Data_4
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse2_packsswb_128_fold:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X86-SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; X86-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI32_0, kind: FK_Data_4
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_packsswb_128_fold:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X86-AVX1-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI32_0, kind: FK_Data_4
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_packsswb_128_fold:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vmovaps LCPI32_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X86-AVX512-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI32_0, kind: FK_Data_4
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_packsswb_128_fold:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X64-SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; X64-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI32_0-4, kind: reloc_riprel_4byte
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_packsswb_128_fold:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X64-AVX1-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI32_0-4, kind: reloc_riprel_4byte
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_packsswb_128_fold:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovaps {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X64-AVX512-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI32_0-4, kind: reloc_riprel_4byte
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
   ret <16 x i8> %res
 }
@@ -745,17 +855,17 @@ define <16 x i8> @test_x86_sse2_packuswb
 ; SSE-LABEL: test_x86_sse2_packuswb_128:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    packuswb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x67,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_packuswb_128:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x67,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_packuswb_128:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_packuswb_128:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x67,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_packuswb_128:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -763,26 +873,47 @@ declare <16 x i8> @llvm.x86.sse2.packusw
 
 
 define <16 x i8> @test_x86_sse2_packuswb_128_fold() {
-; SSE-LABEL: test_x86_sse2_packuswb_128_fold:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
-; SSE-NEXT:    ## fixup A - offset: 3, value: LCPI34_0, kind: FK_Data_4
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse2_packuswb_128_fold:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX2-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; AVX2-NEXT:    ## fixup A - offset: 4, value: LCPI34_0, kind: FK_Data_4
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_packuswb_128_fold:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vmovaps LCPI34_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; SKX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; SKX-NEXT:    ## fixup A - offset: 4, value: LCPI34_0, kind: FK_Data_4
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse2_packuswb_128_fold:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X86-SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; X86-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI34_0, kind: FK_Data_4
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_packuswb_128_fold:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X86-AVX1-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI34_0, kind: FK_Data_4
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_packuswb_128_fold:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vmovaps LCPI34_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X86-AVX512-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI34_0, kind: FK_Data_4
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_packuswb_128_fold:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X64-SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; X64-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI34_0-4, kind: reloc_riprel_4byte
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_packuswb_128_fold:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X64-AVX1-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI34_0-4, kind: reloc_riprel_4byte
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_packuswb_128_fold:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovaps {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X64-AVX512-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI34_0-4, kind: reloc_riprel_4byte
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
   ret <16 x i8> %res
 }
@@ -792,17 +923,17 @@ define <16 x i8> @test_x86_sse2_padds_b(
 ; SSE-LABEL: test_x86_sse2_padds_b:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    paddsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xec,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_padds_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xec,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_padds_b:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_padds_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xec,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_padds_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -813,17 +944,17 @@ define <8 x i16> @test_x86_sse2_padds_w(
 ; SSE-LABEL: test_x86_sse2_padds_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    paddsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xed,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_padds_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xed,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_padds_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_padds_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xed,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_padds_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -834,17 +965,17 @@ define <16 x i8> @test_x86_sse2_paddus_b
 ; SSE-LABEL: test_x86_sse2_paddus_b:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_paddus_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_paddus_b:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_paddus_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_paddus_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -855,17 +986,17 @@ define <8 x i16> @test_x86_sse2_paddus_w
 ; SSE-LABEL: test_x86_sse2_paddus_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_paddus_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_paddus_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_paddus_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_paddus_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -876,17 +1007,17 @@ define <4 x i32> @test_x86_sse2_pmadd_wd
 ; SSE-LABEL: test_x86_sse2_pmadd_wd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmaddwd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf5,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pmadd_wd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf5,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pmadd_wd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pmadd_wd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf5,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pmadd_wd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -897,17 +1028,17 @@ define <8 x i16> @test_x86_sse2_pmaxs_w(
 ; SSE-LABEL: test_x86_sse2_pmaxs_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmaxsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xee,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pmaxs_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xee,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pmaxs_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pmaxs_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xee,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pmaxs_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -918,17 +1049,17 @@ define <16 x i8> @test_x86_sse2_pmaxu_b(
 ; SSE-LABEL: test_x86_sse2_pmaxu_b:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmaxub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xde,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pmaxu_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xde,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pmaxu_b:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pmaxu_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xde,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pmaxu_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -939,17 +1070,17 @@ define <8 x i16> @test_x86_sse2_pmins_w(
 ; SSE-LABEL: test_x86_sse2_pmins_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pminsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xea,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pmins_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xea,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pmins_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pmins_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xea,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pmins_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -960,17 +1091,17 @@ define <16 x i8> @test_x86_sse2_pminu_b(
 ; SSE-LABEL: test_x86_sse2_pminu_b:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pminub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xda,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pminu_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xda,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pminu_b:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pminu_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xda,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pminu_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -981,12 +1112,12 @@ define i32 @test_x86_sse2_pmovmskb_128(<
 ; SSE-LABEL: test_x86_sse2_pmovmskb_128:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmovmskb %xmm0, %eax ## encoding: [0x66,0x0f,0xd7,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; VCHECK-LABEL: test_x86_sse2_pmovmskb_128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vpmovmskb %xmm0, %eax ## encoding: [0xc5,0xf9,0xd7,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; AVX-LABEL: test_x86_sse2_pmovmskb_128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovmskb %xmm0, %eax ## encoding: [0xc5,0xf9,0xd7,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -997,17 +1128,17 @@ define <8 x i16> @test_x86_sse2_pmulh_w(
 ; SSE-LABEL: test_x86_sse2_pmulh_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmulhw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe5,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pmulh_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe5,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pmulh_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pmulh_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe5,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pmulh_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1018,17 +1149,17 @@ define <8 x i16> @test_x86_sse2_pmulhu_w
 ; SSE-LABEL: test_x86_sse2_pmulhu_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmulhuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe4,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pmulhu_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe4,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pmulhu_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pmulhu_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe4,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pmulhu_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1039,17 +1170,17 @@ define <2 x i64> @test_x86_sse2_psad_bw(
 ; SSE-LABEL: test_x86_sse2_psad_bw:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psadbw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf6,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psad_bw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf6,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psad_bw:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf6,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psad_bw:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf6,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psad_bw:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf6,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -1060,17 +1191,17 @@ define <4 x i32> @test_x86_sse2_psll_d(<
 ; SSE-LABEL: test_x86_sse2_psll_d:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pslld %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf2,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psll_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpslld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf2,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psll_d:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpslld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf2,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psll_d:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpslld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf2,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psll_d:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpslld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf2,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -1081,17 +1212,17 @@ define <2 x i64> @test_x86_sse2_psll_q(<
 ; SSE-LABEL: test_x86_sse2_psll_q:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psllq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf3,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psll_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf3,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psll_q:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf3,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psll_q:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf3,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psll_q:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf3,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -1102,17 +1233,17 @@ define <8 x i16> @test_x86_sse2_psll_w(<
 ; SSE-LABEL: test_x86_sse2_psll_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psllw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf1,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psll_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf1,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psll_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psll_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf1,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psll_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1123,17 +1254,17 @@ define <4 x i32> @test_x86_sse2_pslli_d(
 ; SSE-LABEL: test_x86_sse2_pslli_d:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    pslld $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xf0,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pslli_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpslld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xf0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pslli_d:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpslld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pslli_d:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpslld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xf0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pslli_d:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpslld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -1144,17 +1275,17 @@ define <2 x i64> @test_x86_sse2_pslli_q(
 ; SSE-LABEL: test_x86_sse2_pslli_q:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psllq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xf0,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pslli_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pslli_q:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsllq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pslli_q:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsllq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pslli_q:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsllq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -1165,17 +1296,17 @@ define <8 x i16> @test_x86_sse2_pslli_w(
 ; SSE-LABEL: test_x86_sse2_pslli_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psllw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xf0,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_pslli_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_pslli_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_pslli_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_pslli_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsllw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1186,17 +1317,17 @@ define <4 x i32> @test_x86_sse2_psra_d(<
 ; SSE-LABEL: test_x86_sse2_psra_d:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrad %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe2,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psra_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe2,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psra_d:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe2,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psra_d:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe2,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psra_d:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe2,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -1207,17 +1338,17 @@ define <8 x i16> @test_x86_sse2_psra_w(<
 ; SSE-LABEL: test_x86_sse2_psra_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psraw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe1,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psra_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe1,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psra_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psra_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe1,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psra_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1228,17 +1359,17 @@ define <4 x i32> @test_x86_sse2_psrai_d(
 ; SSE-LABEL: test_x86_sse2_psrai_d:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrad $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xe0,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psrai_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrad $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xe0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psrai_d:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsrad $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xe0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psrai_d:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrad $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xe0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrai_d:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrad $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xe0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -1249,17 +1380,17 @@ define <8 x i16> @test_x86_sse2_psrai_w(
 ; SSE-LABEL: test_x86_sse2_psrai_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psraw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xe0,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psrai_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsraw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psrai_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsraw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xe0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psrai_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsraw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrai_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsraw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xe0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1270,17 +1401,17 @@ define <4 x i32> @test_x86_sse2_psrl_d(<
 ; SSE-LABEL: test_x86_sse2_psrl_d:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrld %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd2,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psrl_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd2,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psrl_d:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psrl_d:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd2,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrl_d:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -1291,17 +1422,17 @@ define <2 x i64> @test_x86_sse2_psrl_q(<
 ; SSE-LABEL: test_x86_sse2_psrl_q:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrlq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd3,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psrl_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd3,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psrl_q:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psrl_q:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd3,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrl_q:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -1312,17 +1443,17 @@ define <8 x i16> @test_x86_sse2_psrl_w(<
 ; SSE-LABEL: test_x86_sse2_psrl_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrlw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd1,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psrl_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd1,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psrl_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psrl_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd1,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrl_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1333,17 +1464,17 @@ define <4 x i32> @test_x86_sse2_psrli_d(
 ; SSE-LABEL: test_x86_sse2_psrli_d:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrld $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xd0,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psrli_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xd0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psrli_d:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsrld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xd0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psrli_d:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xd0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrli_d:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xd0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -1354,17 +1485,17 @@ define <2 x i64> @test_x86_sse2_psrli_q(
 ; SSE-LABEL: test_x86_sse2_psrli_q:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrlq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xd0,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psrli_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psrli_q:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsrlq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psrli_q:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrlq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrli_q:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrlq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -1375,17 +1506,17 @@ define <8 x i16> @test_x86_sse2_psrli_w(
 ; SSE-LABEL: test_x86_sse2_psrli_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrlw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xd0,0x07]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psrli_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xd0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psrli_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsrlw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xd0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psrli_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xd0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psrli_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsrlw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xd0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1396,17 +1527,17 @@ define <16 x i8> @test_x86_sse2_psubs_b(
 ; SSE-LABEL: test_x86_sse2_psubs_b:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psubsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe8,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psubs_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe8,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psubs_b:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psubs_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe8,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psubs_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -1417,17 +1548,17 @@ define <8 x i16> @test_x86_sse2_psubs_w(
 ; SSE-LABEL: test_x86_sse2_psubs_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe9,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psubs_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe9,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psubs_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psubs_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe9,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psubs_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1438,17 +1569,17 @@ define <16 x i8> @test_x86_sse2_psubus_b
 ; SSE-LABEL: test_x86_sse2_psubus_b:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psubusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd8,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psubus_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psubus_b:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psubus_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psubus_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -1459,17 +1590,17 @@ define <8 x i16> @test_x86_sse2_psubus_w
 ; SSE-LABEL: test_x86_sse2_psubus_w:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    psubusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd9,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_psubus_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_psubus_w:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_psubus_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psubus_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1480,17 +1611,17 @@ define <2 x double> @test_x86_sse2_sqrt_
 ; SSE-LABEL: test_x86_sse2_sqrt_pd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    sqrtpd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x51,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_sqrt_pd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vsqrtpd %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x51,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_sqrt_pd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vsqrtpd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x51,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_sqrt_pd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vsqrtpd %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x51,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_sqrt_pd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vsqrtpd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x51,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -1501,17 +1632,17 @@ define <2 x double> @test_x86_sse2_sqrt_
 ; SSE-LABEL: test_x86_sse2_sqrt_sd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_sqrt_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_sqrt_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_sqrt_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_sqrt_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -1519,26 +1650,44 @@ declare <2 x double> @llvm.x86.sse2.sqrt
 
 
 define <2 x double> @test_x86_sse2_sqrt_sd_vec_load(<2 x double>* %a0) {
-; SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE-NEXT:    movapd (%eax), %xmm0 ## encoding: [0x66,0x0f,0x28,0x00]
-; SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT:    vmovapd (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x28,0x00]
-; AVX2-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SKX-NEXT:    vmovapd (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x00]
-; SKX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movapd (%eax), %xmm0 ## encoding: [0x66,0x0f,0x28,0x00]
+; X86-SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse2_sqrt_sd_vec_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovapd (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x28,0x00]
+; X86-AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse2_sqrt_sd_vec_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovapd (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x00]
+; X86-AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movapd (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x28,0x07]
+; X64-SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse2_sqrt_sd_vec_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovapd (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x28,0x07]
+; X64-AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse2_sqrt_sd_vec_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovapd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x07]
+; X64-AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a1 = load <2 x double>, <2 x double>* %a0, align 16
   %res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
@@ -1553,25 +1702,25 @@ define i32 @test_x86_sse2_ucomieq_sd(<2
 ; SSE-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
 ; SSE-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
 ; SSE-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_ucomieq_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX2-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX2-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX2-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX2-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_ucomieq_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
-; SKX-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; SKX-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
-; SKX-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
-; SKX-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_ucomieq_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
+; AVX1-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
+; AVX1-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
+; AVX1-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
+; AVX1-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_ucomieq_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
+; AVX512-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
+; AVX512-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
+; AVX512-NEXT:    andb %al, %cl ## encoding: [0x20,0xc1]
+; AVX512-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -1584,21 +1733,21 @@ define i32 @test_x86_sse2_ucomige_sd(<2
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_ucomige_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX2-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_ucomige_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SKX-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
-; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_ucomige_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX1-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
+; AVX1-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_ucomige_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
+; AVX512-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.ucomige.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -1611,21 +1760,21 @@ define i32 @test_x86_sse2_ucomigt_sd(<2
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_ucomigt_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX2-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_ucomigt_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SKX-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
-; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_ucomigt_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX1-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
+; AVX1-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_ucomigt_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
+; AVX512-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.ucomigt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -1638,21 +1787,21 @@ define i32 @test_x86_sse2_ucomile_sd(<2
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2e,0xc8]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_ucomile_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX2-NEXT:    vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
-; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_ucomile_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SKX-NEXT:    vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
-; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_ucomile_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX1-NEXT:    vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
+; AVX1-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_ucomile_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512-NEXT:    vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
+; AVX512-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.ucomile.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -1665,21 +1814,21 @@ define i32 @test_x86_sse2_ucomilt_sd(<2
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2e,0xc8]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_ucomilt_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX2-NEXT:    vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
-; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_ucomilt_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SKX-NEXT:    vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
-; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_ucomilt_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX1-NEXT:    vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
+; AVX1-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_ucomilt_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512-NEXT:    vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
+; AVX512-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.ucomilt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -1694,25 +1843,25 @@ define i32 @test_x86_sse2_ucomineq_sd(<2
 ; SSE-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
 ; SSE-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
 ; SSE-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; SSE-NEXT:    retl ## encoding: [0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_ucomineq_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX2-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX2-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX2-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX2-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse2_ucomineq_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
-; SKX-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
-; SKX-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
-; SKX-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
-; SKX-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_ucomineq_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
+; AVX1-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
+; AVX1-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
+; AVX1-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
+; AVX1-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_ucomineq_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
+; AVX512-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
+; AVX512-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
+; AVX512-NEXT:    orb %al, %cl ## encoding: [0x08,0xc1]
+; AVX512-NEXT:    movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.ucomineq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -1722,7 +1871,7 @@ define void @test_x86_sse2_pause() {
 ; CHECK-LABEL: test_x86_sse2_pause:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pause ## encoding: [0xf3,0x90]
-; CHECK-NEXT:    retl ## encoding: [0xc3]
+; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   tail call void @llvm.x86.sse2.pause()
   ret void
 }
@@ -1732,7 +1881,7 @@ define void @lfence() nounwind {
 ; CHECK-LABEL: lfence:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    lfence ## encoding: [0x0f,0xae,0xe8]
-; CHECK-NEXT:    retl ## encoding: [0xc3]
+; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   tail call void @llvm.x86.sse2.lfence()
   ret void
 }
@@ -1742,18 +1891,23 @@ define void @mfence() nounwind {
 ; CHECK-LABEL: mfence:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    mfence ## encoding: [0x0f,0xae,0xf0]
-; CHECK-NEXT:    retl ## encoding: [0xc3]
+; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   tail call void @llvm.x86.sse2.mfence()
   ret void
 }
 declare void @llvm.x86.sse2.mfence() nounwind
 
 define void @clflush(i8* %p) nounwind {
-; CHECK-LABEL: clflush:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT:    clflush (%eax) ## encoding: [0x0f,0xae,0x38]
-; CHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-LABEL: clflush:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    clflush (%eax) ## encoding: [0x0f,0xae,0x38]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: clflush:
+; X64:       ## %bb.0:
+; X64-NEXT:    clflush (%rdi) ## encoding: [0x0f,0xae,0x3f]
+; X64-NEXT:    retq ## encoding: [0xc3]
   tail call void @llvm.x86.sse2.clflush(i8* %p)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll?rev=333832&r1=333831&r2=333832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll Sat Jun  2 12:43:14 2018
@@ -1,27 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
 
 define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
-; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
-; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse2_cvtsi642sd:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsi2sdq %rdi, %xmm0 ## encoding: [0xf2,0x48,0x0f,0x2a,0xc7]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvtsi642sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
-; AVX2-NEXT:    retq ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvtsi642sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
+; AVX1-NEXT:    retq ## encoding: [0xc3]
 ;
-; SKX-LABEL: test_x86_sse2_cvtsi642sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
-; SKX-NEXT:    retq ## encoding: [0xc3]
+; AVX512-LABEL: test_x86_sse2_cvtsi642sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
+; AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll?rev=333832&r1=333831&r2=333832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll Sat Jun  2 12:43:14 2018
@@ -1,27 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
 
 define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvtsd2si64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsd2si %xmm0, %rax
-; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse2_cvtsd2si64:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsd2si %xmm0, %rax ## encoding: [0xf2,0x48,0x0f,0x2d,0xc0]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvtsd2si64:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvtsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
-; AVX2-NEXT:    retq ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvtsd2si64:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvtsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
+; AVX1-NEXT:    retq ## encoding: [0xc3]
 ;
-; SKX-LABEL: test_x86_sse2_cvtsd2si64:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvtsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
-; SKX-NEXT:    retq ## encoding: [0xc3]
+; AVX512-LABEL: test_x86_sse2_cvtsd2si64:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvtsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
+; AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
   ret i64 %res
 }
@@ -29,24 +25,20 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2
 
 
 define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvttsd2si64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvttsd2si %xmm0, %rax
-; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse2_cvttsd2si64:
 ; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttsd2si %xmm0, %rax ## encoding: [0xf2,0x48,0x0f,0x2c,0xc0]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; AVX2-LABEL: test_x86_sse2_cvttsd2si64:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vcvttsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
-; AVX2-NEXT:    retq ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse2_cvttsd2si64:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vcvttsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
+; AVX1-NEXT:    retq ## encoding: [0xc3]
 ;
-; SKX-LABEL: test_x86_sse2_cvttsd2si64:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vcvttsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
-; SKX-NEXT:    retq ## encoding: [0xc3]
+; AVX512-LABEL: test_x86_sse2_cvttsd2si64:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vcvttsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
+; AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
   ret i64 %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2.ll?rev=333832&r1=333831&r2=333832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2.ll Sat Jun  2 12:43:14 2018
@@ -1,25 +1,51 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 ; Tests for SSE2 and below, without SSE3+.
 
 define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind  {
-; X86-LABEL: test1:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movapd (%ecx), %xmm0
-; X86-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
-; X86-NEXT:    movapd %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test1:
-; X64:       # %bb.0:
-; X64-NEXT:    movapd (%rsi), %xmm1
-; X64-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
-; X64-NEXT:    movapd %xmm1, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test1:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movapd (%ecx), %xmm0
+; X86-SSE-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
+; X86-SSE-NEXT:    movapd %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test1:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovapd (%ecx), %xmm0
+; X86-AVX-NEXT:    vmovlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
+; X86-AVX-NEXT:    vmovapd %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test1:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movapd (%rsi), %xmm1
+; X64-SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; X64-SSE-NEXT:    movapd %xmm1, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: test1:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: test1:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vmovapd (%rsi), %xmm1
+; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; X64-AVX512-NEXT:    vmovapd %xmm0, (%rdi)
+; X64-AVX512-NEXT:    retq
 	%tmp3 = load <2 x double>, <2 x double>* %A, align 16
 	%tmp7 = insertelement <2 x double> undef, double %B, i32 0
 	%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 2, i32 1 >
@@ -28,21 +54,37 @@ define void @test1(<2 x double>* %r, <2
 }
 
 define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind  {
-; X86-LABEL: test2:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movapd (%ecx), %xmm0
-; X86-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; X86-NEXT:    movapd %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test2:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps (%rsi), %xmm1
-; X64-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; X64-NEXT:    movaps %xmm1, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test2:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movapd (%ecx), %xmm0
+; X86-SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; X86-SSE-NEXT:    movapd %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test2:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovapd (%ecx), %xmm0
+; X86-AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; X86-AVX-NEXT:    vmovapd %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test2:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps (%rsi), %xmm1
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-SSE-NEXT:    movaps %xmm1, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test2:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps (%rsi), %xmm1
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
 	%tmp3 = load <2 x double>, <2 x double>* %A, align 16
 	%tmp7 = insertelement <2 x double> undef, double %B, i32 0
 	%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 0, i32 2 >
@@ -52,22 +94,39 @@ define void @test2(<2 x double>* %r, <2
 
 
 define void @test3(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) nounwind {
-; X86-LABEL: test3:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movaps (%edx), %xmm0
-; X86-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X86-NEXT:    movaps %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test3:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps (%rsi), %xmm0
-; X64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-NEXT:    movaps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test3:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movaps (%edx), %xmm0
+; X86-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test3:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    vmovaps (%edx), %xmm0
+; X86-AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X86-AVX-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test3:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps (%rsi), %xmm0
+; X64-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test3:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps (%rsi), %xmm0
+; X64-AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
 	%tmp = load <4 x float>, <4 x float>* %B		; <<4 x float>> [#uses=2]
 	%tmp3 = load <4 x float>, <4 x float>* %A		; <<4 x float>> [#uses=2]
 	%tmp.upgrd.1 = extractelement <4 x float> %tmp3, i32 0		; <float> [#uses=1]
@@ -83,42 +142,74 @@ define void @test3(<4 x float>* %res, <4
 }
 
 define void @test4(<4 x float> %X, <4 x float>* %res) nounwind {
-; X86-LABEL: test4:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1,3,3]
-; X86-NEXT:    movaps %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test4:
-; X64:       # %bb.0:
-; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1,3,3]
-; X64-NEXT:    movaps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test4:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test4:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; X86-AVX-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test4:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test4:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
 	%tmp5 = shufflevector <4 x float> %X, <4 x float> undef, <4 x i32> < i32 2, i32 6, i32 3, i32 7 >		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp5, <4 x float>* %res
 	ret void
 }
 
 define <4 x i32> @test5(i8** %ptr) nounwind {
-; X86-LABEL: test5:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl (%eax), %eax
-; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-NEXT:    pxor %xmm0, %xmm0
-; X86-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X86-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X86-NEXT:    retl
-;
-; X64-LABEL: test5:
-; X64:       # %bb.0:
-; X64-NEXT:    movq (%rdi), %rax
-; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X64-NEXT:    pxor %xmm0, %xmm0
-; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test5:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl (%eax), %eax
+; X86-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    pxor %xmm0, %xmm0
+; X86-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test5:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl (%eax), %eax
+; X86-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X86-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test5:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq (%rdi), %rax
+; X64-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT:    pxor %xmm0, %xmm0
+; X64-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test5:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movq (%rdi), %rax
+; X64-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X64-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX-NEXT:    retq
 	%tmp = load i8*, i8** %ptr		; <i8*> [#uses=1]
 	%tmp.upgrd.1 = bitcast i8* %tmp to float*		; <float*> [#uses=1]
 	%tmp.upgrd.2 = load float, float* %tmp.upgrd.1		; <float> [#uses=1]
@@ -135,19 +226,33 @@ define <4 x i32> @test5(i8** %ptr) nounw
 }
 
 define void @test6(<4 x float>* %res, <4 x float>* %A) nounwind {
-; X86-LABEL: test6:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movaps (%ecx), %xmm0
-; X86-NEXT:    movaps %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test6:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps (%rsi), %xmm0
-; X64-NEXT:    movaps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test6:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movaps (%ecx), %xmm0
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test6:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovaps (%ecx), %xmm0
+; X86-AVX-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test6:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps (%rsi), %xmm0
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test6:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps (%rsi), %xmm0
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %tmp1 = load <4 x float>, <4 x float>* %A            ; <<4 x float>> [#uses=1]
   %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> < i32 0, i32 5, i32 6, i32 7 >          ; <<4 x float>> [#uses=1]
   store <4 x float> %tmp2, <4 x float>* %res
@@ -155,17 +260,17 @@ define void @test6(<4 x float>* %res, <4
 }
 
 define void @test7() nounwind {
-; X86-LABEL: test7:
-; X86:       # %bb.0:
-; X86-NEXT:    xorps %xmm0, %xmm0
-; X86-NEXT:    movaps %xmm0, 0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test7:
-; X64:       # %bb.0:
-; X64-NEXT:    xorps %xmm0, %xmm0
-; X64-NEXT:    movaps %xmm0, 0
-; X64-NEXT:    retq
+; SSE-LABEL: test7:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    movaps %xmm0, 0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test7:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, 0
+; AVX-NEXT:    ret{{[l|q]}}
   bitcast <4 x i32> zeroinitializer to <4 x float>                ; <<4 x float>>:1 [#uses=1]
   shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> zeroinitializer         ; <<4 x float>>:2 [#uses=1]
   store <4 x float> %2, <4 x float>* null
@@ -175,15 +280,25 @@ define void @test7() nounwind {
 @x = external global [4 x i32]
 
 define <2 x i64> @test8() nounwind {
-; X86-LABEL: test8:
-; X86:       # %bb.0:
-; X86-NEXT:    movups x, %xmm0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test8:
-; X64:       # %bb.0:
-; X64-NEXT:    movups {{.*}}(%rip), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test8:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movups x, %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test8:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovups x, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test8:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movups {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test8:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovups {{.*}}(%rip), %xmm0
+; X64-AVX-NEXT:    retq
 	%tmp = load i32, i32* getelementptr ([4 x i32], [4 x i32]* @x, i32 0, i32 0)		; <i32> [#uses=1]
 	%tmp3 = load i32, i32* getelementptr ([4 x i32], [4 x i32]* @x, i32 0, i32 1)		; <i32> [#uses=1]
 	%tmp5 = load i32, i32* getelementptr ([4 x i32], [4 x i32]* @x, i32 0, i32 2)		; <i32> [#uses=1]
@@ -197,17 +312,29 @@ define <2 x i64> @test8() nounwind {
 }
 
 define <4 x float> @test9(i32 %dummy, float %a, float %b, float %c, float %d) nounwind {
-; X86-LABEL: test9:
-; X86:       # %bb.0:
-; X86-NEXT:    movups {{[0-9]+}}(%esp), %xmm0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test9:
-; X64:       # %bb.0:
-; X64-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; X64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test9:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movups {{[0-9]+}}(%esp), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test9:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovups {{[0-9]+}}(%esp), %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test9:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X64-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test9:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; X64-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; X64-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; X64-AVX-NEXT:    retq
 	%tmp = insertelement <4 x float> undef, float %a, i32 0		; <<4 x float>> [#uses=1]
 	%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
 	%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2		; <<4 x float>> [#uses=1]
@@ -216,17 +343,29 @@ define <4 x float> @test9(i32 %dummy, fl
 }
 
 define <4 x float> @test10(float %a, float %b, float %c, float %d) nounwind {
-; X86-LABEL: test10:
-; X86:       # %bb.0:
-; X86-NEXT:    movups {{[0-9]+}}(%esp), %xmm0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test10:
-; X64:       # %bb.0:
-; X64-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; X64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test10:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movups {{[0-9]+}}(%esp), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test10:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovups {{[0-9]+}}(%esp), %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test10:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X64-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test10:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; X64-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; X64-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; X64-AVX-NEXT:    retq
 	%tmp = insertelement <4 x float> undef, float %a, i32 0		; <<4 x float>> [#uses=1]
 	%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
 	%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2		; <<4 x float>> [#uses=1]
@@ -235,42 +374,62 @@ define <4 x float> @test10(float %a, flo
 }
 
 define <2 x double> @test11(double %a, double %b) nounwind {
-; X86-LABEL: test11:
-; X86:       # %bb.0:
-; X86-NEXT:    movups {{[0-9]+}}(%esp), %xmm0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test11:
-; X64:       # %bb.0:
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test11:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movups {{[0-9]+}}(%esp), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test11:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovups {{[0-9]+}}(%esp), %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test11:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test11:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX-NEXT:    retq
 	%tmp = insertelement <2 x double> undef, double %a, i32 0		; <<2 x double>> [#uses=1]
 	%tmp7 = insertelement <2 x double> %tmp, double %b, i32 1		; <<2 x double>> [#uses=1]
 	ret <2 x double> %tmp7
 }
 
 define void @test12() nounwind {
-; X86-LABEL: test12:
-; X86:       # %bb.0:
-; X86-NEXT:    movapd 0, %xmm0
-; X86-NEXT:    movapd {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; X86-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
-; X86-NEXT:    xorps %xmm2, %xmm2
-; X86-NEXT:    movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
-; X86-NEXT:    addps %xmm1, %xmm2
-; X86-NEXT:    movaps %xmm2, 0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test12:
-; X64:       # %bb.0:
-; X64-NEXT:    movapd 0, %xmm0
-; X64-NEXT:    movapd {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; X64-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
-; X64-NEXT:    xorps %xmm2, %xmm2
-; X64-NEXT:    movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
-; X64-NEXT:    addps %xmm1, %xmm2
-; X64-NEXT:    movaps %xmm2, 0
-; X64-NEXT:    retq
+; SSE-LABEL: test12:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd 0, %xmm0
+; SSE-NEXT:    movapd {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT:    xorps %xmm2, %xmm2
+; SSE-NEXT:    movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
+; SSE-NEXT:    addps %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, 0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test12:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovaps 0, %xmm0
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm0[0,1],mem[2,3]
+; AVX1-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX1-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vmovaps %xmm0, 0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test12:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovapd 0, %xmm0
+; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; AVX512-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX512-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovaps %xmm0, 0
+; AVX512-NEXT:    ret{{[l|q]}}
   %tmp1 = load <4 x float>, <4 x float>* null          ; <<4 x float>> [#uses=2]
   %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >             ; <<4 x float>> [#uses=1]
   %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 >                ; <<4 x float>> [#uses=1]
@@ -280,24 +439,43 @@ define void @test12() nounwind {
 }
 
 define void @test13(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
-; X86-LABEL: test13:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movaps (%edx), %xmm0
-; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],mem[0,1]
-; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; X86-NEXT:    movaps %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test13:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps (%rdx), %xmm0
-; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],mem[0,1]
-; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; X64-NEXT:    movaps %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test13:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movaps (%edx), %xmm0
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],mem[0,1]
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test13:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    vmovaps (%edx), %xmm0
+; X86-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],mem[0,1]
+; X86-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; X86-AVX-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test13:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps (%rdx), %xmm0
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],mem[0,1]
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test13:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps (%rdx), %xmm0
+; X64-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],mem[0,1]
+; X64-AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
   %tmp3 = load <4 x float>, <4 x float>* %B            ; <<4 x float>> [#uses=1]
   %tmp5 = load <4 x float>, <4 x float>* %C            ; <<4 x float>> [#uses=1]
   %tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x i32> < i32 1, i32 4, i32 1, i32 5 >         ; <<4 x float>> [#uses=1]
@@ -306,27 +484,47 @@ define void @test13(<4 x float>* %res, <
 }
 
 define <4 x float> @test14(<4 x float>* %x, <4 x float>* %y) nounwind {
-; X86-LABEL: test14:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movaps (%ecx), %xmm1
-; X86-NEXT:    movaps (%eax), %xmm2
-; X86-NEXT:    movaps %xmm2, %xmm0
-; X86-NEXT:    addps %xmm1, %xmm0
-; X86-NEXT:    subps %xmm1, %xmm2
-; X86-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; X86-NEXT:    retl
-;
-; X64-LABEL: test14:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps (%rsi), %xmm1
-; X64-NEXT:    movaps (%rdi), %xmm2
-; X64-NEXT:    movaps %xmm2, %xmm0
-; X64-NEXT:    addps %xmm1, %xmm0
-; X64-NEXT:    subps %xmm1, %xmm2
-; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test14:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movaps (%ecx), %xmm1
+; X86-SSE-NEXT:    movaps (%eax), %xmm2
+; X86-SSE-NEXT:    movaps %xmm2, %xmm0
+; X86-SSE-NEXT:    addps %xmm1, %xmm0
+; X86-SSE-NEXT:    subps %xmm1, %xmm2
+; X86-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test14:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovaps (%ecx), %xmm0
+; X86-AVX-NEXT:    vmovaps (%eax), %xmm1
+; X86-AVX-NEXT:    vaddps %xmm0, %xmm1, %xmm2
+; X86-AVX-NEXT:    vsubps %xmm0, %xmm1, %xmm0
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test14:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps (%rsi), %xmm1
+; X64-SSE-NEXT:    movaps (%rdi), %xmm2
+; X64-SSE-NEXT:    movaps %xmm2, %xmm0
+; X64-SSE-NEXT:    addps %xmm1, %xmm0
+; X64-SSE-NEXT:    subps %xmm1, %xmm2
+; X64-SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test14:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps (%rsi), %xmm0
+; X64-AVX-NEXT:    vmovaps (%rdi), %xmm1
+; X64-AVX-NEXT:    vaddps %xmm0, %xmm1, %xmm2
+; X64-AVX-NEXT:    vsubps %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; X64-AVX-NEXT:    retq
   %tmp = load <4 x float>, <4 x float>* %y             ; <<4 x float>> [#uses=2]
   %tmp5 = load <4 x float>, <4 x float>* %x            ; <<4 x float>> [#uses=2]
   %tmp9 = fadd <4 x float> %tmp5, %tmp             ; <<4 x float>> [#uses=1]
@@ -336,19 +534,33 @@ define <4 x float> @test14(<4 x float>*
 }
 
 define <4 x float> @test15(<4 x float>* %x, <4 x float>* %y) nounwind {
-; X86-LABEL: test15:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movaps (%ecx), %xmm0
-; X86-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; X86-NEXT:    retl
-;
-; X64-LABEL: test15:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    movaps (%rdi), %xmm0
-; X64-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test15:
+; X86-SSE:       # %bb.0: # %entry
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movaps (%ecx), %xmm0
+; X86-SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test15:
+; X86-AVX:       # %bb.0: # %entry
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovaps (%ecx), %xmm0
+; X86-AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test15:
+; X64-SSE:       # %bb.0: # %entry
+; X64-SSE-NEXT:    movaps (%rdi), %xmm0
+; X64-SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test15:
+; X64-AVX:       # %bb.0: # %entry
+; X64-AVX-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
+; X64-AVX-NEXT:    retq
 entry:
   %tmp = load <4 x float>, <4 x float>* %y             ; <<4 x float>> [#uses=1]
   %tmp3 = load <4 x float>, <4 x float>* %x            ; <<4 x float>> [#uses=1]
@@ -359,18 +571,35 @@ entry:
 ; PR8900
 
 define  <2 x double> @test16(<4 x double> * nocapture %srcA, <2 x double>* nocapture %dst) {
-; X86-LABEL: test16:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movaps 96(%eax), %xmm0
-; X86-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; X86-NEXT:    retl
-;
-; X64-LABEL: test16:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps 96(%rdi), %xmm0
-; X64-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test16:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movaps 96(%eax), %xmm0
+; X86-SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test16:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovaps 96(%eax), %ymm0
+; X86-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX-NEXT:    vzeroupper
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test16:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps 96(%rdi), %xmm0
+; X64-SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test16:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps 96(%rdi), %ymm0
+; X64-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX-NEXT:    vzeroupper
+; X64-AVX-NEXT:    retq
   %i5 = getelementptr inbounds <4 x double>, <4 x double>* %srcA, i32 3
   %i6 = load <4 x double>, <4 x double>* %i5, align 32
   %i7 = shufflevector <4 x double> %i6, <4 x double> undef, <2 x i32> <i32 0, i32 2>
@@ -379,17 +608,41 @@ define  <2 x double> @test16(<4 x double
 
 ; PR9009
 define fastcc void @test17() nounwind {
-; X86-LABEL: test17:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    movaps {{.*#+}} xmm0 = <u,u,32768,32768>
-; X86-NEXT:    movaps %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test17:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    movaps {{.*#+}} xmm0 = <u,u,32768,32768>
-; X64-NEXT:    movaps %xmm0, (%rax)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test17:
+; X86-SSE:       # %bb.0: # %entry
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm0 = <u,u,32768,32768>
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: test17:
+; X86-AVX1:       # %bb.0: # %entry
+; X86-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = <u,u,32768,32768>
+; X86-AVX1-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX512-LABEL: test17:
+; X86-AVX512:       # %bb.0: # %entry
+; X86-AVX512-NEXT:    vbroadcastss {{.*#+}} xmm0 = [32768,32768,32768,32768]
+; X86-AVX512-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX512-NEXT:    retl
+;
+; X64-SSE-LABEL: test17:
+; X64-SSE:       # %bb.0: # %entry
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm0 = <u,u,32768,32768>
+; X64-SSE-NEXT:    movaps %xmm0, (%rax)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: test17:
+; X64-AVX1:       # %bb.0: # %entry
+; X64-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = <u,u,32768,32768>
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rax)
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: test17:
+; X64-AVX512:       # %bb.0: # %entry
+; X64-AVX512-NEXT:    vbroadcastss {{.*#+}} xmm0 = [32768,32768,32768,32768]
+; X64-AVX512-NEXT:    vmovaps %xmm0, (%rax)
+; X64-AVX512-NEXT:    retq
 entry:
   %0 = insertelement <4 x i32> undef, i32 undef, i32 1
   %1 = shufflevector <4 x i32> <i32 undef, i32 undef, i32 32768, i32 32768>, <4 x i32> %0, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
@@ -400,52 +653,76 @@ entry:
 
 ; PR9210
 define <4 x float> @f(<4 x double>) nounwind {
-; X86-LABEL: f:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    cvtpd2ps %xmm1, %xmm1
-; X86-NEXT:    cvtpd2ps %xmm0, %xmm0
-; X86-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-NEXT:    retl
-;
-; X64-LABEL: f:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    cvtpd2ps %xmm1, %xmm1
-; X64-NEXT:    cvtpd2ps %xmm0, %xmm0
-; X64-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT:    retq
+; SSE-LABEL: f:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    cvtpd2ps %xmm1, %xmm1
+; SSE-NEXT:    cvtpd2ps %xmm0, %xmm0
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: f:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vcvtpd2ps %ymm0, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    ret{{[l|q]}}
 entry:
  %double2float.i = fptrunc <4 x double> %0 to <4 x float>
  ret <4 x float> %double2float.i
 }
 
 define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
-; X86-LABEL: test_insert_64_zext:
-; X86:       # %bb.0:
-; X86-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_insert_64_zext:
-; X64:       # %bb.0:
-; X64-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_insert_64_zext:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_insert_64_zext:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX-NEXT:    ret{{[l|q]}}
   %1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
   ret <2 x i64> %1
 }
 
 define <4 x i32> @PR19721(<4 x i32> %i) {
-; X86-LABEL: PR19721:
-; X86:       # %bb.0:
-; X86-NEXT:    andps {{\.LCPI.*}}, %xmm0
-; X86-NEXT:    retl
-;
-; X64-LABEL: PR19721:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %xmm0, %rax
-; X64-NEXT:    movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
-; X64-NEXT:    andq %rax, %rcx
-; X64-NEXT:    movq %rcx, %xmm1
-; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: PR19721:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    andps {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: PR19721:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: PR19721:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq %xmm0, %rax
+; X64-SSE-NEXT:    movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
+; X64-SSE-NEXT:    andq %rax, %rcx
+; X64-SSE-NEXT:    movq %rcx, %xmm1
+; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: PR19721:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vmovq %xmm0, %rax
+; X64-AVX1-NEXT:    movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
+; X64-AVX1-NEXT:    andq %rax, %rcx
+; X64-AVX1-NEXT:    vmovq %rcx, %xmm1
+; X64-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: PR19721:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vmovq %xmm0, %rax
+; X64-AVX512-NEXT:    movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
+; X64-AVX512-NEXT:    andq %rax, %rcx
+; X64-AVX512-NEXT:    vmovq %rcx, %xmm1
+; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X64-AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-AVX512-NEXT:    retq
   %bc = bitcast <4 x i32> %i to i128
   %insert = and i128 %bc, -4294967296
   %bc2 = bitcast i128 %insert to <4 x i32>
@@ -453,27 +730,21 @@ define <4 x i32> @PR19721(<4 x i32> %i)
 }
 
 define <4 x i32> @test_mul(<4 x i32> %x, <4 x i32> %y) {
-; X86-LABEL: test_mul:
-; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq %xmm1, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-NEXT:    pmuludq %xmm2, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_mul:
-; X64:       # %bb.0:
-; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X64-NEXT:    pmuludq %xmm1, %xmm0
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X64-NEXT:    pmuludq %xmm2, %xmm1
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mul:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm1, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mul:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %m = mul <4 x i32> %x, %y
   ret <4 x i32> %m
 }




More information about the llvm-commits mailing list