[llvm] r333828 - [X86][SSE] Cleanup SSE4A/SSE41/SSE42 intrinsics tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 2 10:33:26 PDT 2018


Author: rksimon
Date: Sat Jun  2 10:33:26 2018
New Revision: 333828

URL: http://llvm.org/viewvc/llvm-project?rev=333828&view=rev
Log:
[X86][SSE] Cleanup SSE4A/SSE41/SSE42 intrinsics tests

Ensure we cover 32/64-bit targets for SSE/AVX/AVX512 cases as necessary

Added some missing encoding checks to SSE4A tests

Modified:
    llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
    llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll
    llvm/trunk/test/CodeGen/X86/sse41.ll
    llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll
    llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll
    llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll
    llvm/trunk/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/sse4a-upgrade.ll
    llvm/trunk/test/CodeGen/X86/sse4a.ll

Modified: llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll Sat Jun  2 10:33:26 2018
@@ -1,19 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=X32
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse41-builtins.c
 
 define <2 x i64> @test_mm_blend_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_blend_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_blend_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_blend_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_blend_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %shuf = shufflevector <8 x i16> %arg0, <8 x i16> %arg1, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 7>
@@ -22,49 +26,51 @@ define <2 x i64> @test_mm_blend_epi16(<2
 }
 
 define <2 x double> @test_mm_blend_pd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_blend_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_blend_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_blend_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_blend_pd:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_blend_pd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; AVX512-NEXT:    ret{{[l|q]}}
   %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 3>
   ret <2 x double> %res
 }
 
 define <4 x float> @test_mm_blend_ps(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: test_mm_blend_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_blend_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_blend_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_blend_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX-NEXT:    ret{{[l|q]}}
   %res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   ret <4 x float> %res
 }
 
 define <2 x i64> @test_mm_blendv_epi8(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
-; X32-LABEL: test_mm_blendv_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    movdqa %xmm0, %xmm3
-; X32-NEXT:    movaps %xmm2, %xmm0
-; X32-NEXT:    pblendvb %xmm0, %xmm1, %xmm3
-; X32-NEXT:    movdqa %xmm3, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_blendv_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    movdqa %xmm0, %xmm3
-; X64-NEXT:    movaps %xmm2, %xmm0
-; X64-NEXT:    pblendvb %xmm0, %xmm1, %xmm3
-; X64-NEXT:    movdqa %xmm3, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_blendv_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    pblendvb %xmm0, %xmm1, %xmm3
+; SSE-NEXT:    movdqa %xmm3, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_blendv_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
@@ -75,132 +81,126 @@ define <2 x i64> @test_mm_blendv_epi8(<2
 declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x double> @test_mm_blendv_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
-; X32-LABEL: test_mm_blendv_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movapd %xmm0, %xmm3
-; X32-NEXT:    movaps %xmm2, %xmm0
-; X32-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
-; X32-NEXT:    movapd %xmm3, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_blendv_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    movapd %xmm0, %xmm3
-; X64-NEXT:    movaps %xmm2, %xmm0
-; X64-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
-; X64-NEXT:    movapd %xmm3, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_blendv_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm0, %xmm3
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
+; SSE-NEXT:    movapd %xmm3, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_blendv_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 
 define <4 x float> @test_mm_blendv_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
-; X32-LABEL: test_mm_blendv_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    movaps %xmm0, %xmm3
-; X32-NEXT:    movaps %xmm2, %xmm0
-; X32-NEXT:    blendvps %xmm0, %xmm1, %xmm3
-; X32-NEXT:    movaps %xmm3, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_blendv_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    movaps %xmm0, %xmm3
-; X64-NEXT:    movaps %xmm2, %xmm0
-; X64-NEXT:    blendvps %xmm0, %xmm1, %xmm3
-; X64-NEXT:    movaps %xmm3, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_blendv_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm0, %xmm3
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm3
+; SSE-NEXT:    movaps %xmm3, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_blendv_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 
 define <2 x double> @test_mm_ceil_pd(<2 x double> %a0) {
-; X32-LABEL: test_mm_ceil_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    roundpd $2, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ceil_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    roundpd $2, %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ceil_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundpd $2, %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ceil_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundpd $2, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 2)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
 
 define <4 x float> @test_mm_ceil_ps(<4 x float> %a0) {
-; X32-LABEL: test_mm_ceil_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    roundps $2, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ceil_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    roundps $2, %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ceil_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundps $2, %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ceil_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundps $2, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 2)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
 
 define <2 x double> @test_mm_ceil_sd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_ceil_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    roundsd $2, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ceil_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    roundsd $2, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ceil_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundsd $2, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ceil_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundsd $2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 2)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone
 
 define <4 x float> @test_mm_ceil_ss(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: test_mm_ceil_ss:
-; X32:       # %bb.0:
-; X32-NEXT:    roundss $2, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_ceil_ss:
-; X64:       # %bb.0:
-; X64-NEXT:    roundss $2, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_ceil_ss:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundss $2, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_ceil_ss:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundss $2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 2)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone
 
 define <2 x i64> @test_mm_cmpeq_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_cmpeq_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpeqq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmpeq_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpeqq %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cmpeq_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqq %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpeq_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %cmp = icmp eq <2 x i64> %a0, %a1
   %res = sext <2 x i1> %cmp to <2 x i64>
   ret <2 x i64> %res
 }
 
 define <2 x i64> @test_mm_cvtepi8_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepi8_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovsxbw %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepi8_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovsxbw %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepi8_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovsxbw %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepi8_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxbw %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %ext0 = shufflevector <16 x i8> %arg0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %sext = sext <8 x i8> %ext0 to <8 x i16>
@@ -209,15 +209,15 @@ define <2 x i64> @test_mm_cvtepi8_epi16(
 }
 
 define <2 x i64> @test_mm_cvtepi8_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepi8_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovsxbd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepi8_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovsxbd %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepi8_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovsxbd %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepi8_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxbd %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %ext0 = shufflevector <16 x i8> %arg0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %sext = sext <4 x i8> %ext0 to <4 x i32>
@@ -226,15 +226,15 @@ define <2 x i64> @test_mm_cvtepi8_epi32(
 }
 
 define <2 x i64> @test_mm_cvtepi8_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepi8_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovsxbq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepi8_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovsxbq %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepi8_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovsxbq %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepi8_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxbq %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %ext0 = shufflevector <16 x i8> %arg0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
   %sext = sext <2 x i8> %ext0 to <2 x i64>
@@ -242,15 +242,15 @@ define <2 x i64> @test_mm_cvtepi8_epi64(
 }
 
 define <2 x i64> @test_mm_cvtepi16_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepi16_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovsxwd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepi16_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovsxwd %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepi16_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovsxwd %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepi16_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxwd %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %ext0 = shufflevector <8 x i16> %arg0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %sext = sext <4 x i16> %ext0 to <4 x i32>
@@ -259,15 +259,15 @@ define <2 x i64> @test_mm_cvtepi16_epi32
 }
 
 define <2 x i64> @test_mm_cvtepi16_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepi16_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovsxwq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepi16_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovsxwq %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepi16_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovsxwq %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepi16_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxwq %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %ext0 = shufflevector <8 x i16> %arg0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
   %sext = sext <2 x i16> %ext0 to <2 x i64>
@@ -275,15 +275,15 @@ define <2 x i64> @test_mm_cvtepi16_epi64
 }
 
 define <2 x i64> @test_mm_cvtepi32_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepi32_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovsxdq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepi32_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovsxdq %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepi32_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovsxdq %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepi32_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxdq %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %ext0 = shufflevector <4 x i32> %arg0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
   %sext = sext <2 x i32> %ext0 to <2 x i64>
@@ -291,15 +291,15 @@ define <2 x i64> @test_mm_cvtepi32_epi64
 }
 
 define <2 x i64> @test_mm_cvtepu8_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepu8_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepu8_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepu8_epi16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepu8_epi16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %ext0 = shufflevector <16 x i8> %arg0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %sext = zext <8 x i8> %ext0 to <8 x i16>
@@ -308,15 +308,15 @@ define <2 x i64> @test_mm_cvtepu8_epi16(
 }
 
 define <2 x i64> @test_mm_cvtepu8_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepu8_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepu8_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepu8_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepu8_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %ext0 = shufflevector <16 x i8> %arg0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %sext = zext <4 x i8> %ext0 to <4 x i32>
@@ -325,15 +325,15 @@ define <2 x i64> @test_mm_cvtepu8_epi32(
 }
 
 define <2 x i64> @test_mm_cvtepu8_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepu8_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepu8_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepu8_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepu8_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %ext0 = shufflevector <16 x i8> %arg0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
   %sext = zext <2 x i8> %ext0 to <2 x i64>
@@ -341,15 +341,15 @@ define <2 x i64> @test_mm_cvtepu8_epi64(
 }
 
 define <2 x i64> @test_mm_cvtepu16_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepu16_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepu16_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepu16_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepu16_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %ext0 = shufflevector <8 x i16> %arg0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %sext = zext <4 x i16> %ext0 to <4 x i32>
@@ -358,15 +358,15 @@ define <2 x i64> @test_mm_cvtepu16_epi32
 }
 
 define <2 x i64> @test_mm_cvtepu16_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepu16_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepu16_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepu16_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepu16_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %ext0 = shufflevector <8 x i16> %arg0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
   %sext = zext <2 x i16> %ext0 to <2 x i64>
@@ -374,15 +374,15 @@ define <2 x i64> @test_mm_cvtepu16_epi64
 }
 
 define <2 x i64> @test_mm_cvtepu32_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_cvtepu32_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cvtepu32_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_cvtepu32_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cvtepu32_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %ext0 = shufflevector <4 x i32> %arg0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
   %sext = zext <2 x i32> %ext0 to <2 x i64>
@@ -390,47 +390,47 @@ define <2 x i64> @test_mm_cvtepu32_epi64
 }
 
 define <2 x double> @test_mm_dp_pd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_dp_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    dppd $7, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_dp_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    dppd $7, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_dp_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    dppd $7, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_dp_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vdppd $7, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwind readnone
 
 define <4 x float> @test_mm_dp_ps(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: test_mm_dp_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    dpps $7, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_dp_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    dpps $7, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_dp_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    dpps $7, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_dp_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vdpps $7, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind readnone
 
 define i32 @test_mm_extract_epi8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_extract_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    pextrb $1, %xmm0, %eax
-; X32-NEXT:    movzbl %al, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_extract_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    pextrb $1, %xmm0, %eax
-; X64-NEXT:    movzbl %al, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_extract_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrb $1, %xmm0, %eax
+; SSE-NEXT:    movzbl %al, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_extract_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX-NEXT:    movzbl %al, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %ext = extractelement <16 x i8> %arg0, i32 1
   %res = zext i8 %ext to i32
@@ -438,121 +438,144 @@ define i32 @test_mm_extract_epi8(<2 x i6
 }
 
 define i32 @test_mm_extract_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_extract_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    extractps $1, %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_extract_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    extractps $1, %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_extract_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    extractps $1, %xmm0, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_extract_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $1, %xmm0, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %ext = extractelement <4 x i32> %arg0, i32 1
   ret i32 %ext
 }
 
 define i64 @test_mm_extract_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_extract_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    extractps $2, %xmm0, %eax
-; X32-NEXT:    extractps $3, %xmm0, %edx
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_extract_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pextrq $1, %xmm0, %rax
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_extract_epi64:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    extractps $2, %xmm0, %eax
+; X86-SSE-NEXT:    extractps $3, %xmm0, %edx
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_extract_epi64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vextractps $2, %xmm0, %eax
+; X86-AVX-NEXT:    vextractps $3, %xmm0, %edx
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_extract_epi64:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pextrq $1, %xmm0, %rax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_extract_epi64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpextrq $1, %xmm0, %rax
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %ext = extractelement <2 x i64> %a0, i32 1
   ret i64 %ext
 }
 
 define i32 @test_mm_extract_ps(<4 x float> %a0) {
-; X32-LABEL: test_mm_extract_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-NEXT:    movd %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_extract_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-NEXT:    movd %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_extract_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_extract_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %ext = extractelement <4 x float> %a0, i32 1
   %bc = bitcast float %ext to i32
   ret i32 %bc
 }
 
 define <2 x double> @test_mm_floor_pd(<2 x double> %a0) {
-; X32-LABEL: test_mm_floor_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    roundpd $1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_floor_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    roundpd $1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_floor_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundpd $1, %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_floor_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundpd $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 1)
   ret <2 x double> %res
 }
 
 define <4 x float> @test_mm_floor_ps(<4 x float> %a0) {
-; X32-LABEL: test_mm_floor_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    roundps $1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_floor_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    roundps $1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_floor_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundps $1, %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_floor_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundps $1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 1)
   ret <4 x float> %res
 }
 
 define <2 x double> @test_mm_floor_sd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_floor_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    roundsd $1, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_floor_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    roundsd $1, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_floor_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundsd $1, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_floor_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundsd $1, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 1)
   ret <2 x double> %res
 }
 
 define <4 x float> @test_mm_floor_ss(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: test_mm_floor_ss:
-; X32:       # %bb.0:
-; X32-NEXT:    roundss $1, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_floor_ss:
-; X64:       # %bb.0:
-; X64-NEXT:    roundss $1, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_floor_ss:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundss $1, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_floor_ss:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundss $1, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 1)
   ret <4 x float> %res
 }
 
 define <2 x i64> @test_mm_insert_epi8(<2 x i64> %a0, i8 %a1) {
-; X32-LABEL: test_mm_insert_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    pinsrb $1, %eax, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_insert_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    pinsrb $1, %eax, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_insert_epi8:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    pinsrb $1, %eax, %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_insert_epi8:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_insert_epi8:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movzbl %dil, %eax
+; X64-SSE-NEXT:    pinsrb $1, %eax, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_insert_epi8:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movzbl %dil, %eax
+; X64-AVX-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = insertelement <16 x i8> %arg0, i8 %a1,i32 1
   %bc = bitcast <16 x i8> %res to <2 x i64>
@@ -560,15 +583,25 @@ define <2 x i64> @test_mm_insert_epi8(<2
 }
 
 define <2 x i64> @test_mm_insert_epi32(<2 x i64> %a0, i32 %a1) {
-; X32-LABEL: test_mm_insert_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pinsrd $1, {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_insert_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pinsrd $1, %edi, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_insert_epi32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pinsrd $1, {{[0-9]+}}(%esp), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_insert_epi32:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_insert_epi32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pinsrd $1, %edi, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_insert_epi32:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = insertelement <4 x i32> %arg0, i32 %a1,i32 1
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -576,45 +609,56 @@ define <2 x i64> @test_mm_insert_epi32(<
 }
 
 define <2 x i64> @test_mm_insert_epi64(<2 x i64> %a0, i64 %a1) {
-; X32-LABEL: test_mm_insert_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    pinsrd $2, {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT:    pinsrd $3, {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_insert_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    pinsrq $1, %rdi, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_insert_epi64:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pinsrd $2, {{[0-9]+}}(%esp), %xmm0
+; X86-SSE-NEXT:    pinsrd $3, {{[0-9]+}}(%esp), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_insert_epi64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_insert_epi64:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pinsrq $1, %rdi, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_insert_epi64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
   %res = insertelement <2 x i64> %a0, i64 %a1,i32 1
   ret <2 x i64> %res
 }
 
 define <4 x float> @test_mm_insert_ps(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: test_mm_insert_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1],zero,xmm0[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_insert_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1],zero,xmm0[3]
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_insert_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1],zero,xmm0[3]
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_insert_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[1],zero,xmm0[3]
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 4)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_max_epi8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_max_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    pmaxsb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_max_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    pmaxsb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_max_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmaxsb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_max_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %cmp = icmp sgt <16 x i8> %arg0, %arg1
@@ -624,15 +668,15 @@ define <2 x i64> @test_mm_max_epi8(<2 x
 }
 
 define <2 x i64> @test_mm_max_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_max_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pmaxsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_max_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pmaxsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_max_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmaxsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_max_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %cmp = icmp sgt <4 x i32> %arg0, %arg1
@@ -642,15 +686,15 @@ define <2 x i64> @test_mm_max_epi32(<2 x
 }
 
 define <2 x i64> @test_mm_max_epu16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_max_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    pmaxuw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_max_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    pmaxuw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_max_epu16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmaxuw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_max_epu16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %cmp = icmp ugt <8 x i16> %arg0, %arg1
@@ -660,15 +704,15 @@ define <2 x i64> @test_mm_max_epu16(<2 x
 }
 
 define <2 x i64> @test_mm_max_epu32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_max_epu32:
-; X32:       # %bb.0:
-; X32-NEXT:    pmaxud %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_max_epu32:
-; X64:       # %bb.0:
-; X64-NEXT:    pmaxud %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_max_epu32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmaxud %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_max_epu32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %cmp = icmp ugt <4 x i32> %arg0, %arg1
@@ -678,15 +722,15 @@ define <2 x i64> @test_mm_max_epu32(<2 x
 }
 
 define <2 x i64> @test_mm_min_epi8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_min_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    pminsb %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_min_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    pminsb %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_min_epi8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pminsb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_min_epi8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %cmp = icmp slt <16 x i8> %arg0, %arg1
@@ -696,15 +740,15 @@ define <2 x i64> @test_mm_min_epi8(<2 x
 }
 
 define <2 x i64> @test_mm_min_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_min_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pminsd %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_min_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pminsd %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_min_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pminsd %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_min_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %cmp = icmp slt <4 x i32> %arg0, %arg1
@@ -714,15 +758,15 @@ define <2 x i64> @test_mm_min_epi32(<2 x
 }
 
 define <2 x i64> @test_mm_min_epu16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_min_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    pminuw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_min_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    pminuw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_min_epu16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pminuw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_min_epu16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %cmp = icmp ult <8 x i16> %arg0, %arg1
@@ -732,15 +776,15 @@ define <2 x i64> @test_mm_min_epu16(<2 x
 }
 
 define <2 x i64> @test_mm_min_epu32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_min_epu32:
-; X32:       # %bb.0:
-; X32-NEXT:    pminud %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_min_epu32:
-; X64:       # %bb.0:
-; X64-NEXT:    pminud %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_min_epu32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pminud %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_min_epu32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %cmp = icmp ult <4 x i32> %arg0, %arg1
@@ -750,15 +794,15 @@ define <2 x i64> @test_mm_min_epu32(<2 x
 }
 
 define <2 x i64> @test_mm_minpos_epu16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_minpos_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    phminposuw %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_minpos_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    phminposuw %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_minpos_epu16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    phminposuw %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_minpos_epu16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vphminposuw %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %arg0)
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -767,15 +811,15 @@ define <2 x i64> @test_mm_minpos_epu16(<
 declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_mpsadbw_epu8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_mpsadbw_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    mpsadbw $1, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mpsadbw_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    mpsadbw $1, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mpsadbw_epu8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    mpsadbw $1, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_mpsadbw_epu8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmpsadbw $1, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %arg0, <16 x i8> %arg1, i8 1)
@@ -785,31 +829,41 @@ define <2 x i64> @test_mm_mpsadbw_epu8(<
 declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_mul_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_mul_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    psllq $32, %xmm0
-; X32-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X32-NEXT:    psrad $31, %xmm0
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
-; X32-NEXT:    psllq $32, %xmm1
-; X32-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; X32-NEXT:    psrad $31, %xmm1
-; X32-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
-; X32-NEXT:    pmuldq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mul_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    psllq $32, %xmm0
-; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X64-NEXT:    psrad $31, %xmm0
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
-; X64-NEXT:    psllq $32, %xmm1
-; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; X64-NEXT:    psrad $31, %xmm1
-; X64-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
-; X64-NEXT:    pmuldq %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mul_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllq $32, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE-NEXT:    psrad $31, %xmm0
+; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; SSE-NEXT:    psllq $32, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE-NEXT:    psrad $31, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; SSE-NEXT:    pmuldq %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: test_mm_mul_epi32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_mul_epi32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraq $32, %zmm0, %zmm0
+; AVX512-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX512-NEXT:    vpsraq $32, %zmm1, %zmm1
+; AVX512-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    ret{{[l|q]}}
   %A = shl <2 x i64> %a0, <i64 32, i64 32>
   %A1 = ashr exact <2 x i64> %A, <i64 32, i64 32>
   %B = shl <2 x i64> %a1, <i64 32, i64 32>
@@ -819,15 +873,15 @@ define <2 x i64> @test_mm_mul_epi32(<2 x
 }
 
 define <2 x i64> @test_mm_mullo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_mullo_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    pmulld %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_mullo_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    pmulld %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_mullo_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmulld %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_mullo_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = mul <4 x i32> %arg0, %arg1
@@ -836,15 +890,15 @@ define <2 x i64> @test_mm_mullo_epi32(<2
 }
 
 define <2 x i64> @test_mm_packus_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_packus_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    packusdw %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_packus_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    packusdw %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_packus_epi32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    packusdw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_packus_epi32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %arg0, <4 x i32> %arg1)
@@ -854,72 +908,83 @@ define <2 x i64> @test_mm_packus_epi32(<
 declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x double> @test_mm_round_pd(<2 x double> %a0) {
-; X32-LABEL: test_mm_round_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    roundpd $4, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_round_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    roundpd $4, %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_round_pd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundpd $4, %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_round_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundpd $4, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 4)
   ret <2 x double> %res
 }
 
 define <4 x float> @test_mm_round_ps(<4 x float> %a0) {
-; X32-LABEL: test_mm_round_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    roundps $4, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_round_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    roundps $4, %xmm0, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_round_ps:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundps $4, %xmm0, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_round_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundps $4, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 4)
   ret <4 x float> %res
 }
 
 define <2 x double> @test_mm_round_sd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: test_mm_round_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    roundsd $4, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_round_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    roundsd $4, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_round_sd:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundsd $4, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_round_sd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundsd $4, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 4)
   ret <2 x double> %res
 }
 
 define <4 x float> @test_mm_round_ss(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: test_mm_round_ss:
-; X32:       # %bb.0:
-; X32-NEXT:    roundss $4, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_round_ss:
-; X64:       # %bb.0:
-; X64-NEXT:    roundss $4, %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_round_ss:
+; SSE:       # %bb.0:
+; SSE-NEXT:    roundss $4, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_round_ss:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vroundss $4, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 4)
   ret <4 x float> %res
 }
 
 define <2 x i64> @test_mm_stream_load_si128(<2 x i64>* %a0) {
-; X32-LABEL: test_mm_stream_load_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movntdqa (%eax), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_stream_load_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    movntdqa (%rdi), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: test_mm_stream_load_si128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movntdqa (%eax), %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: test_mm_stream_load_si128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovntdqa (%eax), %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_stream_load_si128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movntdqa (%rdi), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_stream_load_si128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovntdqa (%rdi), %xmm0
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64>* %a0 to i8*
   %res = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %arg0)
   ret <2 x i64> %res
@@ -927,114 +992,114 @@ define <2 x i64> @test_mm_stream_load_si
 declare <2 x i64> @llvm.x86.sse41.movntdqa(i8*) nounwind readnone
 
 define i32 @test_mm_test_all_ones(<2 x i64> %a0) {
-; X32-LABEL: test_mm_test_all_ones:
-; X32:       # %bb.0:
-; X32-NEXT:    pcmpeqd %xmm1, %xmm1
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    setb %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_test_all_ones:
-; X64:       # %bb.0:
-; X64-NEXT:    pcmpeqd %xmm1, %xmm1
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    setb %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_test_all_ones:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ptest %xmm1, %xmm0
+; SSE-NEXT:    setb %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_test_all_ones:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vptest %xmm1, %xmm0
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> <i64 -1, i64 -1>)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
 
 define i32 @test_mm_test_all_zeros(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_test_all_zeros:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    sete %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_test_all_zeros:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    sete %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_test_all_zeros:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ptest %xmm1, %xmm0
+; SSE-NEXT:    sete %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_test_all_zeros:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vptest %xmm1, %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %a0, <2 x i64> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
 
 define i32 @test_mm_test_mix_ones_zeros(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_test_mix_ones_zeros:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    seta %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_test_mix_ones_zeros:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    seta %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_test_mix_ones_zeros:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ptest %xmm1, %xmm0
+; SSE-NEXT:    seta %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_test_mix_ones_zeros:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vptest %xmm1, %xmm0
+; AVX-NEXT:    seta %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %a0, <2 x i64> %a1)
   ret i32 %res
 }
 declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
 
 define i32 @test_mm_testc_si128(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_testc_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    setb %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_testc_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    setb %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_testc_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ptest %xmm1, %xmm0
+; SSE-NEXT:    setb %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_testc_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vptest %xmm1, %xmm0
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
   ret i32 %res
 }
 
 define i32 @test_mm_testnzc_si128(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_testnzc_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    seta %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_testnzc_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    seta %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_testnzc_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ptest %xmm1, %xmm0
+; SSE-NEXT:    seta %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_testnzc_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vptest %xmm1, %xmm0
+; AVX-NEXT:    seta %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %a0, <2 x i64> %a1)
   ret i32 %res
 }
 
 define i32 @test_mm_testz_si128(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_testz_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    sete %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_testz_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    sete %al
-; X64-NEXT:    retq
+; SSE-LABEL: test_mm_testz_si128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    ptest %xmm1, %xmm0
+; SSE-NEXT:    sete %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_testz_si128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vptest %xmm1, %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %a0, <2 x i64> %a1)
   ret i32 %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll Sat Jun  2 10:33:26 2018
@@ -1,14 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse4.1 | FileCheck %s
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 ; This test works just like the non-upgrade one except that it only checks
 ; forms which require auto-upgrading.
 
 define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse41_blendpd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_blendpd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    blendps $12, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x0c]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1],xmm1[2,3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse41_blendpd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vblendps $3, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x03]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_blendpd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vmovsd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf3,0x10,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 6) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -16,10 +34,17 @@ declare <2 x double> @llvm.x86.sse41.ble
 
 
 define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse41_blendps:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_blendps:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    blendps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x07]
+; SSE-NEXT:    ## xmm0 = xmm1[0,1,2],xmm0[3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_blendps:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08]
+; AVX-NEXT:    ## xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -27,10 +52,15 @@ declare <4 x float> @llvm.x86.sse41.blen
 
 
 define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse41_dppd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    dppd $7, %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_dppd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_dppd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -38,10 +68,15 @@ declare <2 x double> @llvm.x86.sse41.dpp
 
 
 define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse41_dpps:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    dpps $7, %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_dpps:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_dpps:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -49,10 +84,23 @@ declare <4 x float> @llvm.x86.sse41.dpps
 
 
 define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse41_insertps:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_insertps:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
+; SSE-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse41_insertps:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
+; AVX1-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_insertps:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
+; AVX512-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i32 17) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -60,11 +108,27 @@ declare <4 x float> @llvm.x86.sse41.inse
 
 
 define <2 x i64> @test_x86_sse41_movntdqa(<2 x i64>* %a0) {
-; CHECK-LABEL: test_x86_sse41_movntdqa:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movntdqa (%eax), %xmm0
-; CHECK-NEXT:    retl
+; X86-SSE-LABEL: test_x86_sse41_movntdqa:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movntdqa (%eax), %xmm0 ## encoding: [0x66,0x0f,0x38,0x2a,0x00]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse41_movntdqa:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovntdqa (%eax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2a,0x00]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse41_movntdqa:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movntdqa (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x38,0x2a,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse41_movntdqa:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vmovntdqa (%rdi), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2a,0x07]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %arg0 = bitcast <2 x i64>* %a0 to i8*
   %res = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %arg0)
   ret <2 x i64> %res
@@ -73,10 +137,15 @@ declare <2 x i64> @llvm.x86.sse41.movntd
 
 
 define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse41_mpsadbw:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    mpsadbw $7, %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_mpsadbw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_mpsadbw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i32 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -84,10 +153,17 @@ declare <8 x i16> @llvm.x86.sse41.mpsadb
 
 
 define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: test_x86_sse41_pblendw:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pblendw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pblendw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x07]
+; SSE-NEXT:    ## xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pblendw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpblendw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0e,0xc1,0x07]
+; AVX-NEXT:    ## xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -95,10 +171,15 @@ declare <8 x i16> @llvm.x86.sse41.pblend
 
 
 define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxbd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovsxbd %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovsxbd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovsxbd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x21,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovsxbd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovsxbd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x21,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -106,10 +187,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovsx
 
 
 define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxbq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovsxbq %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovsxbq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovsxbq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x22,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovsxbq:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovsxbq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x22,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -117,10 +203,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 
 define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxbw:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovsxbw %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovsxbw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovsxbw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x20,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovsxbw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovsxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x20,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -128,10 +219,15 @@ declare <8 x i16> @llvm.x86.sse41.pmovsx
 
 
 define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxdq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovsxdq %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovsxdq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovsxdq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x25,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovsxdq:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovsxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x25,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -139,10 +235,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 
 define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxwd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovsxwd %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovsxwd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovsxwd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x23,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovsxwd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovsxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x23,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -150,10 +251,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovsx
 
 
 define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxwq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovsxwq %xmm0, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovsxwq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovsxwq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x24,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovsxwq:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovsxwq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x24,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -161,10 +267,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 
 define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxbd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovzxbd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovzxbd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x31,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovzxbd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovzxbd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x31,0xc0]
+; AVX-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -172,10 +285,17 @@ declare <4 x i32> @llvm.x86.sse41.pmovzx
 
 
 define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxbq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovzxbq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovzxbq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x32,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovzxbq:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovzxbq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0xc0]
+; AVX-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -183,10 +303,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovzx
 
 
 define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxbw:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovzxbw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovzxbw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x30,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovzxbw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovzxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x30,0xc0]
+; AVX-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -194,10 +321,17 @@ declare <8 x i16> @llvm.x86.sse41.pmovzx
 
 
 define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxdq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovzxdq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovzxdq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x35,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovzxdq:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovzxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x35,0xc0]
+; AVX-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -205,10 +339,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovzx
 
 
 define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxwd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovzxwd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovzxwd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x33,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovzxwd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovzxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x33,0xc0]
+; AVX-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -216,90 +357,137 @@ declare <4 x i32> @llvm.x86.sse41.pmovzx
 
 
 define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxwq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmovzxwq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovzxwq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x34,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmovzxwq:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmovzxwq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x34,0xc0]
+; AVX-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone
 
 define <16 x i8> @max_epi8(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: max_epi8:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmaxsb %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: max_epi8:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: max_epi8:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %res
 }
 declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <16 x i8> @min_epi8(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: min_epi8:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pminsb %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: min_epi8:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: min_epi8:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %res
 }
 declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @max_epu16(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: max_epu16:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmaxuw %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: max_epu16:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: max_epu16:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %res
 }
 declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @min_epu16(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: min_epu16:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pminuw %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: min_epu16:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: min_epu16:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %res
 }
 declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <4 x i32> @max_epi32(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: max_epi32:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmaxsd %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: max_epi32:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: max_epi32:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
   ret <4 x i32> %res
 }
 declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <4 x i32> @min_epi32(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: min_epi32:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pminsd %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: min_epi32:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: min_epi32:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
   ret <4 x i32> %res
 }
 declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <4 x i32> @max_epu32(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: max_epu32:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmaxud %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: max_epu32:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: max_epu32:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
   ret <4 x i32> %res
 }
 declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <4 x i32> @min_epu32(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: min_epu32:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pminud %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: min_epu32:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: min_epu32:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
   ret <4 x i32> %res
 }
@@ -307,10 +495,15 @@ declare <4 x i32> @llvm.x86.sse41.pminud
 
 
 define <2 x i64> @test_x86_sse41_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: test_x86_sse41_pmuldq:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pmuldq %xmm1, %xmm0
-; CHECK-NEXT:    retl
+; SSE-LABEL: test_x86_sse41_pmuldq:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmuldq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x28,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmuldq:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x28,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll Sat Jun  2 10:33:26 2018
@@ -1,21 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefix=SSE41
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
-; SSE41-LABEL: test_x86_sse41_blendvpd:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movapd %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x28,0xd8]
-; SSE41-NEXT:    movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
-; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
-; SSE41-NEXT:    movapd %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x28,0xc3]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_blendvpd:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4b,0xc1,0x20]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_blendvpd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    movapd %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x28,0xd8]
+; SSE-NEXT:    movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
+; SSE-NEXT:    blendvpd %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
+; SSE-NEXT:    movapd %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x28,0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_blendvpd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4b,0xc1,0x20]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -23,18 +26,18 @@ declare <2 x double> @llvm.x86.sse41.ble
 
 
 define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
-; SSE41-LABEL: test_x86_sse41_blendvps:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movaps %xmm0, %xmm3 ## encoding: [0x0f,0x28,0xd8]
-; SSE41-NEXT:    movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
-; SSE41-NEXT:    blendvps %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
-; SSE41-NEXT:    movaps %xmm3, %xmm0 ## encoding: [0x0f,0x28,0xc3]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_blendvps:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4a,0xc1,0x20]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_blendvps:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    movaps %xmm0, %xmm3 ## encoding: [0x0f,0x28,0xd8]
+; SSE-NEXT:    movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
+; SSE-NEXT:    movaps %xmm3, %xmm0 ## encoding: [0x0f,0x28,0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_blendvps:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4a,0xc1,0x20]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -42,15 +45,15 @@ declare <4 x float> @llvm.x86.sse41.blen
 
 
 define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
-; SSE41-LABEL: test_x86_sse41_dppd:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_dppd:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_dppd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_dppd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -58,15 +61,15 @@ declare <2 x double> @llvm.x86.sse41.dpp
 
 
 define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
-; SSE41-LABEL: test_x86_sse41_dpps:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_dpps:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_dpps:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_dpps:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -74,23 +77,23 @@ declare <4 x float> @llvm.x86.sse41.dpps
 
 
 define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
-; SSE41-LABEL: test_x86_sse41_insertps:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
-; SSE41-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_insertps:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
-; AVX2-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_insertps:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
-; SKX-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_insertps:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
+; SSE-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse41_insertps:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
+; AVX1-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_insertps:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
+; AVX512-NEXT:    ## xmm0 = zero,xmm1[0],xmm0[2,3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -99,15 +102,15 @@ declare <4 x float> @llvm.x86.sse41.inse
 
 
 define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE41-LABEL: test_x86_sse41_mpsadbw:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_mpsadbw:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_mpsadbw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_mpsadbw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -115,20 +118,15 @@ declare <8 x i16> @llvm.x86.sse41.mpsadb
 
 
 define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
-; SSE41-LABEL: test_x86_sse41_packusdw:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    packusdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x2b,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_packusdw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_packusdw:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_packusdw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    packusdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x2b,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_packusdw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -136,44 +134,51 @@ declare <8 x i16> @llvm.x86.sse41.packus
 
 
 define <8 x i16> @test_x86_sse41_packusdw_fold() {
-; SSE41-LABEL: test_x86_sse41_packusdw_fold:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
-; SSE41-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
-; SSE41-NEXT:    ## fixup A - offset: 3, value: LCPI7_0, kind: FK_Data_4
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_packusdw_fold:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
-; AVX2-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; AVX2-NEXT:    ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_packusdw_fold:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vmovaps LCPI7_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,65535,65535,0,0]
-; SKX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; SKX-NEXT:    ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse41_packusdw_fold:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
+; X86-SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; X86-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI7_0, kind: FK_Data_4
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse41_packusdw_fold:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
+; X86-AVX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse41_packusdw_fold:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
+; X64-SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; X64-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI7_0-4, kind: reloc_riprel_4byte
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse41_packusdw_fold:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
+; X64-AVX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI7_0-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
   ret <8 x i16> %res
 }
 
 
 define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
-; SSE41-LABEL: test_x86_sse41_pblendvb:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movdqa %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd8]
-; SSE41-NEXT:    movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
-; SSE41-NEXT:    pblendvb %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
-; SSE41-NEXT:    movdqa %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc3]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_pblendvb:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4c,0xc1,0x20]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pblendvb:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd8]
+; SSE-NEXT:    movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
+; SSE-NEXT:    pblendvb %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
+; SSE-NEXT:    movdqa %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pblendvb:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4c,0xc1,0x20]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -181,15 +186,15 @@ declare <16 x i8> @llvm.x86.sse41.pblend
 
 
 define <8 x i16> @test_x86_sse41_phminposuw(<8 x i16> %a0) {
-; SSE41-LABEL: test_x86_sse41_phminposuw:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x41,0xc0]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_phminposuw:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vphminposuw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x41,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_phminposuw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    phminposuw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x41,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_phminposuw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vphminposuw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x41,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -197,20 +202,15 @@ declare <8 x i16> @llvm.x86.sse41.phminp
 
 
 define <16 x i8> @test_x86_sse41_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE41-LABEL: test_x86_sse41_pmaxsb:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_pmaxsb:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_pmaxsb:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pmaxsb:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmaxsb:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -218,20 +218,15 @@ declare <16 x i8> @llvm.x86.sse41.pmaxsb
 
 
 define <4 x i32> @test_x86_sse41_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
-; SSE41-LABEL: test_x86_sse41_pmaxsd:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_pmaxsd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_pmaxsd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pmaxsd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmaxsd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -239,20 +234,15 @@ declare <4 x i32> @llvm.x86.sse41.pmaxsd
 
 
 define <4 x i32> @test_x86_sse41_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
-; SSE41-LABEL: test_x86_sse41_pmaxud:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_pmaxud:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_pmaxud:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pmaxud:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmaxud:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -260,20 +250,15 @@ declare <4 x i32> @llvm.x86.sse41.pmaxud
 
 
 define <8 x i16> @test_x86_sse41_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
-; SSE41-LABEL: test_x86_sse41_pmaxuw:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_pmaxuw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_pmaxuw:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pmaxuw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pmaxuw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -281,20 +266,15 @@ declare <8 x i16> @llvm.x86.sse41.pmaxuw
 
 
 define <16 x i8> @test_x86_sse41_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE41-LABEL: test_x86_sse41_pminsb:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_pminsb:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_pminsb:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x38,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pminsb:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pminsb:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -302,20 +282,15 @@ declare <16 x i8> @llvm.x86.sse41.pminsb
 
 
 define <4 x i32> @test_x86_sse41_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
-; SSE41-LABEL: test_x86_sse41_pminsd:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_pminsd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_pminsd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x39,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pminsd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pminsd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -323,20 +298,15 @@ declare <4 x i32> @llvm.x86.sse41.pminsd
 
 
 define <4 x i32> @test_x86_sse41_pminud(<4 x i32> %a0, <4 x i32> %a1) {
-; SSE41-LABEL: test_x86_sse41_pminud:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_pminud:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_pminud:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pminud:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pminud:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -344,20 +314,15 @@ declare <4 x i32> @llvm.x86.sse41.pminud
 
 
 define <8 x i16> @test_x86_sse41_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
-; SSE41-LABEL: test_x86_sse41_pminuw:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_pminuw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_pminuw:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_pminuw:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_pminuw:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -365,19 +330,19 @@ declare <8 x i16> @llvm.x86.sse41.pminuw
 
 
 define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) {
-; SSE41-LABEL: test_x86_sse41_ptestc:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SSE41-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
-; SSE41-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_ptestc:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; VCHECK-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
-; VCHECK-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_ptestc:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
+; SSE-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_ptestc:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
+; AVX-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -385,19 +350,19 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x
 
 
 define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) {
-; SSE41-LABEL: test_x86_sse41_ptestnzc:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SSE41-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
-; SSE41-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_ptestnzc:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; VCHECK-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
-; VCHECK-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_ptestnzc:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
+; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_ptestnzc:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
+; AVX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -405,19 +370,19 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2
 
 
 define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) {
-; SSE41-LABEL: test_x86_sse41_ptestz:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SSE41-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
-; SSE41-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse41_ptestz:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; VCHECK-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
-; VCHECK-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_ptestz:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
+; SSE-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_ptestz:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
+; AVX-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -425,20 +390,15 @@ declare i32 @llvm.x86.sse41.ptestz(<2 x
 
 
 define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) {
-; SSE41-LABEL: test_x86_sse41_round_pd:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    roundpd $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x07]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_round_pd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_round_pd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vroundpd $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_round_pd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    roundpd $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_round_pd:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -446,20 +406,15 @@ declare <2 x double> @llvm.x86.sse41.rou
 
 
 define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) {
-; SSE41-LABEL: test_x86_sse41_round_ps:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    roundps $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x07]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_round_ps:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_round_ps:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vroundps $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_round_ps:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    roundps $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse41_round_ps:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -467,20 +422,20 @@ declare <4 x float> @llvm.x86.sse41.roun
 
 
 define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) {
-; SSE41-LABEL: test_x86_sse41_round_sd:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    roundsd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0xc1,0x07]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_round_sd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vroundsd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_round_sd:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vroundsd $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_round_sd:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    roundsd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse41_round_sd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vroundsd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_round_sd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vroundsd $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -488,23 +443,38 @@ declare <2 x double> @llvm.x86.sse41.rou
 
 
 define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, <2 x double>* %a1) {
-; SSE41-LABEL: test_x86_sse41_round_sd_load:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE41-NEXT:    roundsd $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x00,0x07]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_round_sd_load:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT:    vroundsd $7, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_round_sd_load:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SKX-NEXT:    vroundsd $7, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse41_round_sd_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    roundsd $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x00,0x07]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: test_x86_sse41_round_sd_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vroundsd $7, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse41_round_sd_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vroundsd $7, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse41_round_sd_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    roundsd $7, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x07,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: test_x86_sse41_round_sd_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vroundsd $7, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x07,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse41_round_sd_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vroundsd $7, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x07,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %a1b = load <2 x double>, <2 x double>* %a1
   %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1b, i32 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
@@ -512,20 +482,20 @@ define <2 x double> @test_x86_sse41_roun
 
 
 define <4 x float> @test_x86_sse41_round_ss(<4 x float> %a0, <4 x float> %a1) {
-; SSE41-LABEL: test_x86_sse41_round_ss:
-; SSE41:       ## %bb.0:
-; SSE41-NEXT:    roundss $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0a,0xc1,0x07]
-; SSE41-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse41_round_ss:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vroundss $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse41_round_ss:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    vroundss $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse41_round_ss:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    roundss $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0a,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse41_round_ss:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vroundss $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_round_ss:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vroundss $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse41.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41.ll Sat Jun  2 10:33:26 2018
@@ -1,49 +1,93 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 @g16 = external global i16
 
 define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind {
-; X32-LABEL: pinsrd_1:
-; X32:       ## %bb.0:
-; X32-NEXT:    pinsrd $1, {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: pinsrd_1:
-; X64:       ## %bb.0:
-; X64-NEXT:    pinsrd $1, %edi, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: pinsrd_1:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pinsrd $1, {{[0-9]+}}(%esp), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x44,0x24,0x04,0x01]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: pinsrd_1:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x01]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: pinsrd_1:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    pinsrd $1, %edi, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0xc7,0x01]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: pinsrd_1:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x01]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1
   ret <4 x i32> %tmp1
 }
 
 define <16 x i8> @pinsrb_1(i8 %s, <16 x i8> %tmp) nounwind {
-; X32-LABEL: pinsrb_1:
-; X32:       ## %bb.0:
-; X32-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: pinsrb_1:
-; X64:       ## %bb.0:
-; X64-NEXT:    pinsrb $1, %edi, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: pinsrb_1:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x20,0x44,0x24,0x04,0x01]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: pinsrb_1:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0x44,0x24,0x04,0x01]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: pinsrb_1:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    pinsrb $1, %edi, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x20,0xc7,0x01]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: pinsrb_1:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vpinsrb $1, %edi, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x01]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1
   ret <16 x i8> %tmp1
 }
 
 define <2 x i64> @pmovzxbq_1() nounwind {
-; X32-LABEL: pmovzxbq_1:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movl L_g16$non_lazy_ptr, %eax
-; X32-NEXT:    pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: pmovzxbq_1:
-; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    movq _g16@{{.*}}(%rip), %rax
-; X64-NEXT:    pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X64-NEXT:    retq
+; X86-SSE-LABEL: pmovzxbq_1:
+; X86-SSE:       ## %bb.0: ## %entry
+; X86-SSE-NEXT:    movl L_g16$non_lazy_ptr, %eax ## encoding: [0xa1,A,A,A,A]
+; X86-SSE-NEXT:    ## fixup A - offset: 1, value: L_g16$non_lazy_ptr, kind: FK_Data_4
+; X86-SSE-NEXT:    pmovzxbq (%eax), %xmm0 ## encoding: [0x66,0x0f,0x38,0x32,0x00]
+; X86-SSE-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: pmovzxbq_1:
+; X86-AVX:       ## %bb.0: ## %entry
+; X86-AVX-NEXT:    movl L_g16$non_lazy_ptr, %eax ## encoding: [0xa1,A,A,A,A]
+; X86-AVX-NEXT:    ## fixup A - offset: 1, value: L_g16$non_lazy_ptr, kind: FK_Data_4
+; X86-AVX-NEXT:    vpmovzxbq (%eax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0x00]
+; X86-AVX-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: pmovzxbq_1:
+; X64-SSE:       ## %bb.0: ## %entry
+; X64-SSE-NEXT:    movq _g16@{{.*}}(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
+; X64-SSE-NEXT:    ## fixup A - offset: 3, value: _g16 at GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
+; X64-SSE-NEXT:    pmovzxbq (%rax), %xmm0 ## encoding: [0x66,0x0f,0x38,0x32,0x00]
+; X64-SSE-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: pmovzxbq_1:
+; X64-AVX:       ## %bb.0: ## %entry
+; X64-AVX-NEXT:    movq _g16@{{.*}}(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
+; X64-AVX-NEXT:    ## fixup A - offset: 3, value: _g16 at GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
+; X64-AVX-NEXT:    vpmovzxbq (%rax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0x00]
+; X64-AVX-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
 entry:
 	%0 = load i16, i16* @g16, align 2		; <i16> [#uses=1]
 	%1 = insertelement <8 x i16> undef, i16 %0, i32 0		; <<8 x i16>> [#uses=1]
@@ -55,29 +99,39 @@ entry:
 declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
 
 define i32 @extractps_1(<4 x float> %v) nounwind {
-; X32-LABEL: extractps_1:
-; X32:       ## %bb.0:
-; X32-NEXT:    extractps $3, %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: extractps_1:
-; X64:       ## %bb.0:
-; X64-NEXT:    extractps $3, %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: extractps_1:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    extractps $3, %xmm0, %eax ## encoding: [0x66,0x0f,0x3a,0x17,0xc0,0x03]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: extractps_1:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vextractps $3, %xmm0, %eax ## encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: extractps_1:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vextractps $3, %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %s = extractelement <4 x float> %v, i32 3
   %i = bitcast float %s to i32
   ret i32 %i
 }
 define i32 @extractps_2(<4 x float> %v) nounwind {
-; X32-LABEL: extractps_2:
-; X32:       ## %bb.0:
-; X32-NEXT:    extractps $3, %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: extractps_2:
-; X64:       ## %bb.0:
-; X64-NEXT:    extractps $3, %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: extractps_2:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    extractps $3, %xmm0, %eax ## encoding: [0x66,0x0f,0x3a,0x17,0xc0,0x03]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: extractps_2:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vextractps $3, %xmm0, %eax ## encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: extractps_2:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vextractps $3, %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %t = bitcast <4 x float> %v to <4 x i32>
   %s = extractelement <4 x i32> %t, i32 3
   ret i32 %s
@@ -89,68 +143,148 @@ define i32 @extractps_2(<4 x float> %v)
 ; is bitcasted to i32, but unsuitable for much of anything else.
 
 define float @ext_1(<4 x float> %v) nounwind {
-; X32-LABEL: ext_1:
-; X32:       ## %bb.0:
-; X32-NEXT:    pushl %eax
-; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; X32-NEXT:    addss LCPI5_0, %xmm0
-; X32-NEXT:    movss %xmm0, (%esp)
-; X32-NEXT:    flds (%esp)
-; X32-NEXT:    popl %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: ext_1:
-; X64:       ## %bb.0:
-; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; X64-NEXT:    addss {{.*}}(%rip), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: ext_1:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pushl %eax ## encoding: [0x50]
+; X86-SSE-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X86-SSE-NEXT:    addss LCPI5_0, %xmm0 ## encoding: [0xf3,0x0f,0x58,0x05,A,A,A,A]
+; X86-SSE-NEXT:    ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
+; X86-SSE-NEXT:    movss %xmm0, (%esp) ## encoding: [0xf3,0x0f,0x11,0x04,0x24]
+; X86-SSE-NEXT:    flds (%esp) ## encoding: [0xd9,0x04,0x24]
+; X86-SSE-NEXT:    popl %eax ## encoding: [0x58]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: ext_1:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    pushl %eax ## encoding: [0x50]
+; X86-AVX1-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X86-AVX1-NEXT:    vaddss LCPI5_0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
+; X86-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; X86-AVX1-NEXT:    flds (%esp) ## encoding: [0xd9,0x04,0x24]
+; X86-AVX1-NEXT:    popl %eax ## encoding: [0x58]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: ext_1:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    pushl %eax ## encoding: [0x50]
+; X86-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X86-AVX512-NEXT:    vaddss LCPI5_0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
+; X86-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
+; X86-AVX512-NEXT:    vmovss %xmm0, (%esp) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; X86-AVX512-NEXT:    flds (%esp) ## encoding: [0xd9,0x04,0x24]
+; X86-AVX512-NEXT:    popl %eax ## encoding: [0x58]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: ext_1:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X64-SSE-NEXT:    addss {{.*}}(%rip), %xmm0 ## encoding: [0xf3,0x0f,0x58,0x05,A,A,A,A]
+; X64-SSE-NEXT:    ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: ext_1:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X64-AVX1-NEXT:    vaddss {{.*}}(%rip), %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
+; X64-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: ext_1:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X64-AVX512-NEXT:    vaddss {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
+; X64-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %s = extractelement <4 x float> %v, i32 3
   %t = fadd float %s, 1.0
   ret float %t
 }
 
 define float @ext_2(<4 x float> %v) nounwind {
-; X32-LABEL: ext_2:
-; X32:       ## %bb.0:
-; X32-NEXT:    pushl %eax
-; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; X32-NEXT:    movss %xmm0, (%esp)
-; X32-NEXT:    flds (%esp)
-; X32-NEXT:    popl %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: ext_2:
-; X64:       ## %bb.0:
-; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: ext_2:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pushl %eax ## encoding: [0x50]
+; X86-SSE-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X86-SSE-NEXT:    movss %xmm0, (%esp) ## encoding: [0xf3,0x0f,0x11,0x04,0x24]
+; X86-SSE-NEXT:    flds (%esp) ## encoding: [0xd9,0x04,0x24]
+; X86-SSE-NEXT:    popl %eax ## encoding: [0x58]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: ext_2:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    pushl %eax ## encoding: [0x50]
+; X86-AVX1-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; X86-AVX1-NEXT:    flds (%esp) ## encoding: [0xd9,0x04,0x24]
+; X86-AVX1-NEXT:    popl %eax ## encoding: [0x58]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: ext_2:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    pushl %eax ## encoding: [0x50]
+; X86-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X86-AVX512-NEXT:    vmovss %xmm0, (%esp) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; X86-AVX512-NEXT:    flds (%esp) ## encoding: [0xd9,0x04,0x24]
+; X86-AVX512-NEXT:    popl %eax ## encoding: [0x58]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: ext_2:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: ext_2:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X64-AVX-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %s = extractelement <4 x float> %v, i32 3
   ret float %s
 }
 
 define i32 @ext_3(<4 x i32> %v) nounwind {
-; X32-LABEL: ext_3:
-; X32:       ## %bb.0:
-; X32-NEXT:    extractps $3, %xmm0, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: ext_3:
-; X64:       ## %bb.0:
-; X64-NEXT:    extractps $3, %xmm0, %eax
-; X64-NEXT:    retq
+; SSE-LABEL: ext_3:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    extractps $3, %xmm0, %eax ## encoding: [0x66,0x0f,0x3a,0x17,0xc0,0x03]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: ext_3:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vextractps $3, %xmm0, %eax ## encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %i = extractelement <4 x i32> %v, i32 3
   ret i32 %i
 }
 
 define <4 x float> @insertps_1(<4 x float> %t1, <4 x float> %t2) nounwind {
-; X32-LABEL: insertps_1:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm1[0],zero,xmm0[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_1:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm1[0],zero,xmm0[3]
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_1:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $21, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x15]
+; SSE-NEXT:    ## xmm0 = zero,xmm1[0],zero,xmm0[3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_1:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $21, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x15]
+; AVX1-NEXT:    ## xmm0 = zero,xmm1[0],zero,xmm0[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_1:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $21, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x15]
+; AVX512-NEXT:    ## xmm0 = zero,xmm1[0],zero,xmm0[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %tmp1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %t1, <4 x float> %t2, i32 21) nounwind readnone
   ret <4 x float> %tmp1
 }
@@ -160,34 +294,96 @@ declare <4 x float> @llvm.x86.sse41.inse
 ; When optimizing for speed, prefer blendps over insertps even if it means we have to
 ; generate a separate movss to load the scalar operand.
 define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind {
-; X32-LABEL: blendps_not_insertps_1:
-; X32:       ## %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: blendps_not_insertps_1:
-; X64:       ## %bb.0:
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: blendps_not_insertps_1:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
+; X86-SSE-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
+; X86-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: blendps_not_insertps_1:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
+; X86-AVX1-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
+; X86-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: blendps_not_insertps_1:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
+; X86-AVX512-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT:    vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
+; X86-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: blendps_not_insertps_1:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
+; X64-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: blendps_not_insertps_1:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
+; X64-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: blendps_not_insertps_1:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
+; X64-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
   ret <4 x float> %tmp1
 }
 
 ; When optimizing for size, generate an insertps if there's a load fold opportunity.
 ; The difference between i386 and x86-64 ABIs for the float operand means we should
-; generate an insertps for X32 but not for X64!
+; generate an insertps for X86 but not for X64!
 define <4 x float> @insertps_or_blendps(<4 x float> %t1, float %t2) minsize nounwind {
-; X32-LABEL: insertps_or_blendps:
-; X32:       ## %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_or_blendps:
-; X64:       ## %bb.0:
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_or_blendps:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
+; X86-SSE-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
+; X86-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_or_blendps:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
+; X86-AVX1-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
+; X86-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_or_blendps:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
+; X86-AVX512-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT:    vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
+; X86-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_or_blendps:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
+; X64-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_or_blendps:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
+; X64-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_or_blendps:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
+; X64-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
   ret <4 x float> %tmp1
 }
@@ -195,70 +391,78 @@ define <4 x float> @insertps_or_blendps(
 ; An insert into the low 32-bits of a vector from the low 32-bits of another vector
 ; is always just a blendps because blendps is never more expensive than insertps.
 define <4 x float> @blendps_not_insertps_2(<4 x float> %t1, <4 x float> %t2) nounwind {
-; X32-LABEL: blendps_not_insertps_2:
-; X32:       ## %bb.0:
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: blendps_not_insertps_2:
-; X64:       ## %bb.0:
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; X64-NEXT:    retq
+; SSE-LABEL: blendps_not_insertps_2:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
+; SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: blendps_not_insertps_2:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
+; AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: blendps_not_insertps_2:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
+; AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %tmp2 = extractelement <4 x float> %t2, i32 0
   %tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0
   ret <4 x float> %tmp1
 }
 
 define i32 @ptestz_1(<2 x i64> %t1, <2 x i64> %t2) nounwind {
-; X32-LABEL: ptestz_1:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    sete %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: ptestz_1:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    sete %al
-; X64-NEXT:    retq
+; SSE-LABEL: ptestz_1:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
+; SSE-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: ptestz_1:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
+; AVX-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %tmp1 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
   ret i32 %tmp1
 }
 
 define i32 @ptestz_2(<2 x i64> %t1, <2 x i64> %t2) nounwind {
-; X32-LABEL: ptestz_2:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    setb %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: ptestz_2:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    setb %al
-; X64-NEXT:    retq
+; SSE-LABEL: ptestz_2:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
+; SSE-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: ptestz_2:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
+; AVX-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %tmp1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
   ret i32 %tmp1
 }
 
 define i32 @ptestz_3(<2 x i64> %t1, <2 x i64> %t2) nounwind {
-; X32-LABEL: ptestz_3:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    ptest %xmm1, %xmm0
-; X32-NEXT:    seta %al
-; X32-NEXT:    retl
-;
-; X64-LABEL: ptestz_3:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    ptest %xmm1, %xmm0
-; X64-NEXT:    seta %al
-; X64-NEXT:    retq
+; SSE-LABEL: ptestz_3:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
+; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: ptestz_3:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
+; AVX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %tmp1 = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
   ret i32 %tmp1
 }
@@ -270,23 +474,41 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2
 ; This used to compile to insertps $0  + insertps $16.  insertps $0 is always
 ; pointless.
 define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind  {
-; X32-LABEL: buildvector:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X32-NEXT:    movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; X32-NEXT:    addss %xmm2, %xmm3
-; X32-NEXT:    addss %xmm1, %xmm0
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: buildvector:
-; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X64-NEXT:    movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; X64-NEXT:    addss %xmm2, %xmm3
-; X64-NEXT:    addss %xmm1, %xmm0
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
-; X64-NEXT:    retq
+; SSE-LABEL: buildvector:
+; SSE:       ## %bb.0: ## %entry
+; SSE-NEXT:    movshdup %xmm0, %xmm2 ## encoding: [0xf3,0x0f,0x16,0xd0]
+; SSE-NEXT:    ## xmm2 = xmm0[1,1,3,3]
+; SSE-NEXT:    movshdup %xmm1, %xmm3 ## encoding: [0xf3,0x0f,0x16,0xd9]
+; SSE-NEXT:    ## xmm3 = xmm1[1,1,3,3]
+; SSE-NEXT:    addss %xmm2, %xmm3 ## encoding: [0xf3,0x0f,0x58,0xda]
+; SSE-NEXT:    addss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x58,0xc1]
+; SSE-NEXT:    insertps $16, %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc3,0x10]
+; SSE-NEXT:    ## xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: buildvector:
+; AVX1:       ## %bb.0: ## %entry
+; AVX1-NEXT:    vmovshdup %xmm0, %xmm2 ## encoding: [0xc5,0xfa,0x16,0xd0]
+; AVX1-NEXT:    ## xmm2 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vmovshdup %xmm1, %xmm3 ## encoding: [0xc5,0xfa,0x16,0xd9]
+; AVX1-NEXT:    ## xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vaddss %xmm3, %xmm2, %xmm2 ## encoding: [0xc5,0xea,0x58,0xd3]
+; AVX1-NEXT:    vaddss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0xc1]
+; AVX1-NEXT:    vinsertps $16, %xmm2, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc2,0x10]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: buildvector:
+; AVX512:       ## %bb.0: ## %entry
+; AVX512-NEXT:    vmovshdup %xmm0, %xmm2 ## encoding: [0xc5,0xfa,0x16,0xd0]
+; AVX512-NEXT:    ## xmm2 = xmm0[1,1,3,3]
+; AVX512-NEXT:    vmovshdup %xmm1, %xmm3 ## encoding: [0xc5,0xfa,0x16,0xd9]
+; AVX512-NEXT:    ## xmm3 = xmm1[1,1,3,3]
+; AVX512-NEXT:    vaddss %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xea,0x58,0xd3]
+; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0xc1]
+; AVX512-NEXT:    vinsertps $16, %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc2,0x10]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 entry:
   %tmp7 = extractelement <2 x float> %A, i32 0
   %tmp5 = extractelement <2 x float> %A, i32 1
@@ -300,16 +522,50 @@ entry:
 }
 
 define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
-; X32-LABEL: insertps_from_shufflevector_1:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_shufflevector_1:
-; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_shufflevector_1:
+; X86-SSE:       ## %bb.0: ## %entry
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movaps (%eax), %xmm1 ## encoding: [0x0f,0x28,0x08]
+; X86-SSE-NEXT:    insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_from_shufflevector_1:
+; X86-AVX1:       ## %bb.0: ## %entry
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX1-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_shufflevector_1:
+; X86-AVX512:       ## %bb.0: ## %entry
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_shufflevector_1:
+; X64-SSE:       ## %bb.0: ## %entry
+; X64-SSE-NEXT:    movaps (%rdi), %xmm1 ## encoding: [0x0f,0x28,0x0f]
+; X64-SSE-NEXT:    insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_from_shufflevector_1:
+; X64-AVX1:       ## %bb.0: ## %entry
+; X64-AVX1-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX1-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_shufflevector_1:
+; X64-AVX512:       ## %bb.0: ## %entry
+; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
 entry:
   %0 = load <4 x float>, <4 x float>* %pb, align 16
   %vecinit6 = shufflevector <4 x float> %a, <4 x float> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
@@ -317,15 +573,23 @@ entry:
 }
 
 define <4 x float> @insertps_from_shufflevector_2(<4 x float> %a, <4 x float> %b) {
-; X32-LABEL: insertps_from_shufflevector_2:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_shufflevector_2:
-; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_from_shufflevector_2:
+; SSE:       ## %bb.0: ## %entry
+; SSE-NEXT:    insertps $96, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x60]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_from_shufflevector_2:
+; AVX1:       ## %bb.0: ## %entry
+; AVX1-NEXT:    vinsertps $96, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_from_shufflevector_2:
+; AVX512:       ## %bb.0: ## %entry
+; AVX512-NEXT:    vinsertps $96, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 entry:
   %vecinit6 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
   ret <4 x float> %vecinit6
@@ -334,18 +598,54 @@ entry:
 ; For loading an i32 from memory into an xmm register we use pinsrd
 ; instead of insertps
 define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocapture readonly %pb) {
-; X32-LABEL: pinsrd_from_shufflevector_i32:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    pshufd {{.*#+}} xmm1 = mem[0,1,2,0]
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: pinsrd_from_shufflevector_i32:
-; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = mem[0,1,2,0]
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: pinsrd_from_shufflevector_i32:
+; X86-SSE:       ## %bb.0: ## %entry
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    pshufd $36, (%eax), %xmm1 ## encoding: [0x66,0x0f,0x70,0x08,0x24]
+; X86-SSE-NEXT:    ## xmm1 = mem[0,1,2,0]
+; X86-SSE-NEXT:    pblendw $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0xc0]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: pinsrd_from_shufflevector_i32:
+; X86-AVX1:       ## %bb.0: ## %entry
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vpermilps $36, (%eax), %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0x08,0x24]
+; X86-AVX1-NEXT:    ## xmm1 = mem[0,1,2,0]
+; X86-AVX1-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: pinsrd_from_shufflevector_i32:
+; X86-AVX512:       ## %bb.0: ## %entry
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vbroadcastss (%eax), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x08]
+; X86-AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: pinsrd_from_shufflevector_i32:
+; X64-SSE:       ## %bb.0: ## %entry
+; X64-SSE-NEXT:    pshufd $36, (%rdi), %xmm1 ## encoding: [0x66,0x0f,0x70,0x0f,0x24]
+; X64-SSE-NEXT:    ## xmm1 = mem[0,1,2,0]
+; X64-SSE-NEXT:    pblendw $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0xc0]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: pinsrd_from_shufflevector_i32:
+; X64-AVX1:       ## %bb.0: ## %entry
+; X64-AVX1-NEXT:    vpermilps $36, (%rdi), %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0x0f,0x24]
+; X64-AVX1-NEXT:    ## xmm1 = mem[0,1,2,0]
+; X64-AVX1-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: pinsrd_from_shufflevector_i32:
+; X64-AVX512:       ## %bb.0: ## %entry
+; X64-AVX512-NEXT:    vbroadcastss (%rdi), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x0f]
+; X64-AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
 entry:
   %0 = load <4 x i32>, <4 x i32>* %pb, align 16
   %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
@@ -353,33 +653,65 @@ entry:
 }
 
 define <4 x i32> @insertps_from_shufflevector_i32_2(<4 x i32> %a, <4 x i32> %b) {
-; X32-LABEL: insertps_from_shufflevector_i32_2:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_shufflevector_i32_2:
-; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_from_shufflevector_i32_2:
+; SSE:       ## %bb.0: ## %entry
+; SSE-NEXT:    pshufd $78, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0x4e]
+; SSE-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; SSE-NEXT:    pblendw $12, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x0c]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: insertps_from_shufflevector_i32_2:
+; AVX:       ## %bb.0: ## %entry
+; AVX-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; AVX-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; AVX-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; AVX-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 entry:
   %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
   ret <4 x i32> %vecinit6
 }
 
 define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b) {
-; X32-LABEL: insertps_from_load_ins_elt_undef:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_load_ins_elt_undef:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_load_ins_elt_undef:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    insertps $16, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0x00,0x10]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_from_load_ins_elt_undef:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vinsertps $16, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x00,0x10]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_load_ins_elt_undef:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vinsertps $16, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0x00,0x10]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_load_ins_elt_undef:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    insertps $16, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0x07,0x10]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_from_load_ins_elt_undef:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vinsertps $16, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x07,0x10]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_load_ins_elt_undef:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vinsertps $16, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0x07,0x10]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load float, float* %b, align 4
   %2 = insertelement <4 x float> undef, float %1, i32 0
   %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
@@ -388,16 +720,27 @@ define <4 x float> @insertps_from_load_i
 
 ; TODO: Like on pinsrd_from_shufflevector_i32, remove this mov instr
 define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
-; X32-LABEL: insertps_from_load_ins_elt_undef_i32:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    pinsrd $2, (%eax), %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_load_ins_elt_undef_i32:
-; X64:       ## %bb.0:
-; X64-NEXT:    pinsrd $2, (%rdi), %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    pinsrd $2, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x00,0x02]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vpinsrd $2, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x00,0x02]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    pinsrd $2, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x07,0x02]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vpinsrd $2, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x07,0x02]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %1 = load i32, i32* %b, align 4
   %2 = insertelement <4 x i32> undef, i32 %1, i32 0
   %result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
@@ -406,17 +749,19 @@ define <4 x i32> @insertps_from_load_ins
 
 ;;;;;; Shuffles optimizable with a single insertps or blend instruction
 define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
-; X32-LABEL: shuf_XYZ0:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorps %xmm1, %xmm1
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: shuf_XYZ0:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorps %xmm1, %xmm1
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
-; X64-NEXT:    retq
+; SSE-LABEL: shuf_XYZ0:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
+; SSE-NEXT:    blendps $8, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x08]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: shuf_XYZ0:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %x, i32 1
@@ -428,15 +773,23 @@ define <4 x float> @shuf_XYZ0(<4 x float
 }
 
 define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
-; X32-LABEL: shuf_XY00:
-; X32:       ## %bb.0:
-; X32-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: shuf_XY00:
-; X64:       ## %bb.0:
-; X64-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-NEXT:    retq
+; SSE-LABEL: shuf_XY00:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    movq %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x7e,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: shuf_XY00:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vmovq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x7e,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: shuf_XY00:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vmovq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %x, i32 1
@@ -447,15 +800,23 @@ define <4 x float> @shuf_XY00(<4 x float
 }
 
 define <4 x float> @shuf_XYY0(<4 x float> %x, <4 x float> %a) {
-; X32-LABEL: shuf_XYY0:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: shuf_XYY0:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
-; X64-NEXT:    retq
+; SSE-LABEL: shuf_XYY0:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $104, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0x68]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1,1],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: shuf_XYY0:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $104, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x68]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,1],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: shuf_XYY0:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $104, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x68]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,1],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %x, i32 1
@@ -466,15 +827,23 @@ define <4 x float> @shuf_XYY0(<4 x float
 }
 
 define <4 x float> @shuf_XYW0(<4 x float> %x, <4 x float> %a) {
-; X32-LABEL: shuf_XYW0:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: shuf_XYW0:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
-; X64-NEXT:    retq
+; SSE-LABEL: shuf_XYW0:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $232, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0xe8]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1,3],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: shuf_XYW0:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $232, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0xe8]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,3],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: shuf_XYW0:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $232, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0xe8]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,3],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %x, i32 1
@@ -486,15 +855,23 @@ define <4 x float> @shuf_XYW0(<4 x float
 }
 
 define <4 x float> @shuf_W00W(<4 x float> %x, <4 x float> %a) {
-; X32-LABEL: shuf_W00W:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: shuf_W00W:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
-; X64-NEXT:    retq
+; SSE-LABEL: shuf_W00W:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $198, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0xc6]
+; SSE-NEXT:    ## xmm0 = xmm0[3],zero,zero,xmm0[3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: shuf_W00W:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $198, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0xc6]
+; AVX1-NEXT:    ## xmm0 = xmm0[3],zero,zero,xmm0[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: shuf_W00W:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $198, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0xc6]
+; AVX512-NEXT:    ## xmm0 = xmm0[3],zero,zero,xmm0[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 3
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecinit2 = insertelement <4 x float> %vecinit, float 0.0, i32 1
@@ -504,15 +881,23 @@ define <4 x float> @shuf_W00W(<4 x float
 }
 
 define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
-; X32-LABEL: shuf_X00A:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: shuf_X00A:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
-; X64-NEXT:    retq
+; SSE-LABEL: shuf_X00A:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $54, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x36]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,zero,xmm1[0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: shuf_X00A:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $54, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x36]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,zero,xmm1[0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: shuf_X00A:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $54, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x36]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,zero,xmm1[0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
@@ -522,15 +907,23 @@ define <4 x float> @shuf_X00A(<4 x float
 }
 
 define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
-; X32-LABEL: shuf_X00X:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: shuf_X00X:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
-; X64-NEXT:    retq
+; SSE-LABEL: shuf_X00X:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $54, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0x36]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,zero,xmm0[0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: shuf_X00X:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $54, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x36]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,zero,xmm0[0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: shuf_X00X:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $54, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x36]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,zero,xmm0[0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
@@ -540,19 +933,32 @@ define <4 x float> @shuf_X00X(<4 x float
 }
 
 define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
-; X32-LABEL: shuf_X0YC:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorps %xmm2, %xmm2
-; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2]
-; X32-NEXT:    retl
-;
-; X64-LABEL: shuf_X0YC:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorps %xmm2, %xmm2
-; X64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2]
-; X64-NEXT:    retq
+; SSE-LABEL: shuf_X0YC:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorps %xmm2, %xmm2 ## encoding: [0x0f,0x57,0xd2]
+; SSE-NEXT:    unpcklps %xmm2, %xmm0 ## encoding: [0x0f,0x14,0xc2]
+; SSE-NEXT:    ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE-NEXT:    insertps $176, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xb0]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[2]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: shuf_X0YC:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
+; AVX1-NEXT:    vunpcklps %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x14,0xc2]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX1-NEXT:    vinsertps $176, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[2]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: shuf_X0YC:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
+; AVX512-NEXT:    vunpcklps %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x14,0xc2]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX512-NEXT:    vinsertps $176, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[2]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
@@ -562,17 +968,19 @@ define <4 x float> @shuf_X0YC(<4 x float
 }
 
 define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
-; X32-LABEL: i32_shuf_XYZ0:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorps %xmm1, %xmm1
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: i32_shuf_XYZ0:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorps %xmm1, %xmm1
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
-; X64-NEXT:    retq
+; SSE-LABEL: i32_shuf_XYZ0:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
+; SSE-NEXT:    blendps $8, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x08]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: i32_shuf_XYZ0:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecext1 = extractelement <4 x i32> %x, i32 1
@@ -584,15 +992,23 @@ define <4 x i32> @i32_shuf_XYZ0(<4 x i32
 }
 
 define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
-; X32-LABEL: i32_shuf_XY00:
-; X32:       ## %bb.0:
-; X32-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: i32_shuf_XY00:
-; X64:       ## %bb.0:
-; X64-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-NEXT:    retq
+; SSE-LABEL: i32_shuf_XY00:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    movq %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x7e,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: i32_shuf_XY00:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vmovq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x7e,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: i32_shuf_XY00:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vmovq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecext1 = extractelement <4 x i32> %x, i32 1
@@ -603,19 +1019,23 @@ define <4 x i32> @i32_shuf_XY00(<4 x i32
 }
 
 define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
-; X32-LABEL: i32_shuf_XYY0:
-; X32:       ## %bb.0:
-; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
-; X32-NEXT:    pxor %xmm0, %xmm0
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: i32_shuf_XYY0:
-; X64:       ## %bb.0:
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
-; X64-NEXT:    pxor %xmm0, %xmm0
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: i32_shuf_XYY0:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pshufd $212, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc8,0xd4]
+; SSE-NEXT:    ## xmm1 = xmm0[0,1,1,3]
+; SSE-NEXT:    pxor %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xef,0xc0]
+; SSE-NEXT:    pblendw $63, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x3f]
+; SSE-NEXT:    ## xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: i32_shuf_XYY0:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpermilps $212, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xd4]
+; AVX-NEXT:    ## xmm0 = xmm0[0,1,1,3]
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecext1 = extractelement <4 x i32> %x, i32 1
@@ -626,19 +1046,23 @@ define <4 x i32> @i32_shuf_XYY0(<4 x i32
 }
 
 define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
-; X32-LABEL: i32_shuf_XYW0:
-; X32:       ## %bb.0:
-; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,1,3,3]
-; X32-NEXT:    pxor %xmm0, %xmm0
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: i32_shuf_XYW0:
-; X64:       ## %bb.0:
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,1,3,3]
-; X64-NEXT:    pxor %xmm0, %xmm0
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: i32_shuf_XYW0:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pshufd $244, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc8,0xf4]
+; SSE-NEXT:    ## xmm1 = xmm0[0,1,3,3]
+; SSE-NEXT:    pxor %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xef,0xc0]
+; SSE-NEXT:    pblendw $63, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x3f]
+; SSE-NEXT:    ## xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: i32_shuf_XYW0:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpermilps $244, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xf4]
+; AVX-NEXT:    ## xmm0 = xmm0[0,1,3,3]
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecext1 = extractelement <4 x i32> %x, i32 1
@@ -650,19 +1074,23 @@ define <4 x i32> @i32_shuf_XYW0(<4 x i32
 }
 
 define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
-; X32-LABEL: i32_shuf_W00W:
-; X32:       ## %bb.0:
-; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; X32-NEXT:    pxor %xmm0, %xmm0
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: i32_shuf_W00W:
-; X64:       ## %bb.0:
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; X64-NEXT:    pxor %xmm0, %xmm0
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: i32_shuf_W00W:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pshufd $231, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc8,0xe7]
+; SSE-NEXT:    ## xmm1 = xmm0[3,1,2,3]
+; SSE-NEXT:    pxor %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xef,0xc0]
+; SSE-NEXT:    pblendw $195, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0xc3]
+; SSE-NEXT:    ## xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: i32_shuf_W00W:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; AVX-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX-NEXT:    vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
+; AVX-NEXT:    ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 3
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecinit2 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -672,21 +1100,37 @@ define <4 x i32> @i32_shuf_W00W(<4 x i32
 }
 
 define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
-; X32-LABEL: i32_shuf_X00A:
-; X32:       ## %bb.0:
-; X32-NEXT:    pxor %xmm2, %xmm2
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: i32_shuf_X00A:
-; X64:       ## %bb.0:
-; X64-NEXT:    pxor %xmm2, %xmm2
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: i32_shuf_X00A:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pxor %xmm2, %xmm2 ## encoding: [0x66,0x0f,0xef,0xd2]
+; SSE-NEXT:    pblendw $252, %xmm2, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc2,0xfc]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
+; SSE-NEXT:    pshufd $36, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0x24]
+; SSE-NEXT:    ## xmm1 = xmm1[0,1,2,0]
+; SSE-NEXT:    pblendw $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0xc0]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: i32_shuf_X00A:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
+; AVX1-NEXT:    vblendps $1, %xmm0, %xmm2, %xmm0 ## encoding: [0xc4,0xe3,0x69,0x0c,0xc0,0x01]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],xmm2[1,2,3]
+; AVX1-NEXT:    vpermilps $36, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x24]
+; AVX1-NEXT:    ## xmm1 = xmm1[0,1,2,0]
+; AVX1-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: i32_shuf_X00A:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
+; AVX512-NEXT:    vmovss %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xea,0x10,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm2[1,2,3]
+; AVX512-NEXT:    vbroadcastss %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0xc9]
+; AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -696,19 +1140,31 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32
 }
 
 define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
-; X32-LABEL: i32_shuf_X00X:
-; X32:       ## %bb.0:
-; X32-NEXT:    pxor %xmm1, %xmm1
-; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: i32_shuf_X00X:
-; X64:       ## %bb.0:
-; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: i32_shuf_X00X:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pxor %xmm1, %xmm1 ## encoding: [0x66,0x0f,0xef,0xc9]
+; SSE-NEXT:    pshufd $36, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x70,0xc0,0x24]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1,2,0]
+; SSE-NEXT:    pblendw $60, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x3c]
+; SSE-NEXT:    ## xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: i32_shuf_X00X:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vpermilps $36, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0x24]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2,0]
+; AVX1-NEXT:    vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: i32_shuf_X00X:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x18,0xc0]
+; AVX512-NEXT:    vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -718,19 +1174,35 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32
 }
 
 define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
-; X32-LABEL: i32_shuf_X0YC:
-; X32:       ## %bb.0:
-; X32-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
-; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
-; X32-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: i32_shuf_X0YC:
-; X64:       ## %bb.0:
-; X64-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
-; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
-; X64-NEXT:    retq
+; SSE-LABEL: i32_shuf_X0YC:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pmovzxdq %xmm0, %xmm2 ## encoding: [0x66,0x0f,0x38,0x35,0xd0]
+; SSE-NEXT:    ## xmm2 = xmm0[0],zero,xmm0[1],zero
+; SSE-NEXT:    pshufd $164, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x70,0xc1,0xa4]
+; SSE-NEXT:    ## xmm0 = xmm1[0,1,2,2]
+; SSE-NEXT:    pblendw $63, %xmm2, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc2,0x3f]
+; SSE-NEXT:    ## xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: i32_shuf_X0YC:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovzxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x35,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT:    vpshufd $164, %xmm1, %xmm1 ## encoding: [0xc5,0xf9,0x70,0xc9,0xa4]
+; AVX1-NEXT:    ## xmm1 = xmm1[0,1,2,2]
+; AVX1-NEXT:    vpblendw $192, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0e,0xc1,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: i32_shuf_X0YC:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovzxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x35,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512-NEXT:    vpshufd $164, %xmm1, %xmm1 ## encoding: [0xc5,0xf9,0x70,0xc9,0xa4]
+; AVX512-NEXT:    ## xmm1 = xmm1[0,1,2,2]
+; AVX512-NEXT:    vpblendd $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x02,0xc1,0x08]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -739,21 +1211,23 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32
   ret <4 x i32> %vecinit5
 }
 
-;; Test for a bug in the first implementation of LowerBuildVectorv4x32
+;; Test for a bug in the first implementation of LowerBuildVectorv4X86
 define < 4 x float> @test_insertps_no_undef(<4 x float> %x) {
-; X32-LABEL: test_insertps_no_undef:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorps %xmm1, %xmm1
-; X32-NEXT:    blendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
-; X32-NEXT:    maxps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_insertps_no_undef:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorps %xmm1, %xmm1
-; X64-NEXT:    blendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
-; X64-NEXT:    maxps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: test_insertps_no_undef:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
+; SSE-NEXT:    blendps $7, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc8,0x07]
+; SSE-NEXT:    ## xmm1 = xmm0[0,1,2],xmm1[3]
+; SSE-NEXT:    maxps %xmm1, %xmm0 ## encoding: [0x0f,0x5f,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_insertps_no_undef:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc9,0x08]
+; AVX-NEXT:    ## xmm1 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5f,0xc1]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %x, i32 1
@@ -767,37 +1241,70 @@ define < 4 x float> @test_insertps_no_un
 }
 
 define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
-; X32-LABEL: blendvb_fallback:
-; X32:       ## %bb.0:
-; X32-NEXT:    psllw $15, %xmm0
-; X32-NEXT:    psraw $15, %xmm0
-; X32-NEXT:    pblendvb %xmm0, %xmm1, %xmm2
-; X32-NEXT:    movdqa %xmm2, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: blendvb_fallback:
-; X64:       ## %bb.0:
-; X64-NEXT:    psllw $15, %xmm0
-; X64-NEXT:    psraw $15, %xmm0
-; X64-NEXT:    pblendvb %xmm0, %xmm1, %xmm2
-; X64-NEXT:    movdqa %xmm2, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: blendvb_fallback:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    psllw $15, %xmm0 ## encoding: [0x66,0x0f,0x71,0xf0,0x0f]
+; SSE-NEXT:    psraw $15, %xmm0 ## encoding: [0x66,0x0f,0x71,0xe0,0x0f]
+; SSE-NEXT:    pblendvb %xmm0, %xmm1, %xmm2 ## encoding: [0x66,0x0f,0x38,0x10,0xd1]
+; SSE-NEXT:    movdqa %xmm2, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc2]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: blendvb_fallback:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpsllw $15, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x0f]
+; AVX-NEXT:    vpsraw $15, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x0f]
+; AVX-NEXT:    vpblendvb %xmm0, %xmm1, %xmm2, %xmm0 ## encoding: [0xc4,0xe3,0x69,0x4c,0xc1,0x00]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %ret = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %y
   ret <8 x i16> %ret
 }
 
-; On X32, account for the argument's move to registers
+; On X86, account for the argument's move to registers
 define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
-; X32-LABEL: insertps_from_vector_load:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_vector_load:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_vector_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movaps (%eax), %xmm1 ## encoding: [0x0f,0x28,0x08]
+; X86-SSE-NEXT:    insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_from_vector_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX1-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_vector_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_vector_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movaps (%rdi), %xmm1 ## encoding: [0x0f,0x28,0x0f]
+; X64-SSE-NEXT:    insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_from_vector_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX1-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_vector_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load <4 x float>, <4 x float>* %pb, align 16
   %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
   ret <4 x float> %2
@@ -806,16 +1313,50 @@ define <4 x float> @insertps_from_vector
 ;; Use a non-zero CountS for insertps
 ;; Try to match a bit more of the instr, since we need the load's offset.
 define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
-; X32-LABEL: insertps_from_vector_load_offset:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_vector_load_offset:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_vector_load_offset:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movaps (%eax), %xmm1 ## encoding: [0x0f,0x28,0x08]
+; X86-SSE-NEXT:    insertps $96, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x60]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_from_vector_load_offset:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX1-NEXT:    vinsertps $96, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_vector_load_offset:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX512-NEXT:    vinsertps $96, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_vector_load_offset:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movaps (%rdi), %xmm1 ## encoding: [0x0f,0x28,0x0f]
+; X64-SSE-NEXT:    insertps $96, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x60]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_from_vector_load_offset:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX1-NEXT:    vinsertps $96, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_vector_load_offset:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX512-NEXT:    vinsertps $96, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load <4 x float>, <4 x float>* %pb, align 16
   %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
   ret <4 x float> %2
@@ -823,19 +1364,59 @@ define <4 x float> @insertps_from_vector
 
 ;; Try to match a bit more of the instr, since we need the load's offset.
 define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
-; X32-LABEL: insertps_from_vector_load_offset_2:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    shll $4, %ecx
-; X32-NEXT:    insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_vector_load_offset_2:
-; X64:       ## %bb.0:
-; X64-NEXT:    shlq $4, %rsi
-; X64-NEXT:    insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_vector_load_offset_2:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
+; X86-SSE-NEXT:    shll $4, %ecx ## encoding: [0xc1,0xe1,0x04]
+; X86-SSE-NEXT:    movaps (%eax,%ecx), %xmm1 ## encoding: [0x0f,0x28,0x0c,0x08]
+; X86-SSE-NEXT:    insertps $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xc0]
+; X86-SSE-NEXT:    ## xmm0 = xmm1[3],xmm0[1,2,3]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_from_vector_load_offset_2:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
+; X86-AVX1-NEXT:    shll $4, %ecx ## encoding: [0xc1,0xe1,0x04]
+; X86-AVX1-NEXT:    vmovaps (%eax,%ecx), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0c,0x08]
+; X86-AVX1-NEXT:    vinsertps $192, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
+; X86-AVX1-NEXT:    ## xmm0 = xmm1[3],xmm0[1,2,3]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_vector_load_offset_2:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
+; X86-AVX512-NEXT:    shll $4, %ecx ## encoding: [0xc1,0xe1,0x04]
+; X86-AVX512-NEXT:    vmovaps (%eax,%ecx), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0c,0x08]
+; X86-AVX512-NEXT:    vinsertps $192, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
+; X86-AVX512-NEXT:    ## xmm0 = xmm1[3],xmm0[1,2,3]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_vector_load_offset_2:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    shlq $4, %rsi ## encoding: [0x48,0xc1,0xe6,0x04]
+; X64-SSE-NEXT:    movaps (%rdi,%rsi), %xmm1 ## encoding: [0x0f,0x28,0x0c,0x37]
+; X64-SSE-NEXT:    insertps $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xc0]
+; X64-SSE-NEXT:    ## xmm0 = xmm1[3],xmm0[1,2,3]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_from_vector_load_offset_2:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    shlq $4, %rsi ## encoding: [0x48,0xc1,0xe6,0x04]
+; X64-AVX1-NEXT:    vmovaps (%rdi,%rsi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0c,0x37]
+; X64-AVX1-NEXT:    vinsertps $192, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
+; X64-AVX1-NEXT:    ## xmm0 = xmm1[3],xmm0[1,2,3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_vector_load_offset_2:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    shlq $4, %rsi ## encoding: [0x48,0xc1,0xe6,0x04]
+; X64-AVX512-NEXT:    vmovaps (%rdi,%rsi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0c,0x37]
+; X64-AVX512-NEXT:    vinsertps $192, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
+; X64-AVX512-NEXT:    ## xmm0 = xmm1[3],xmm0[1,2,3]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
   %2 = load <4 x float>, <4 x float>* %1, align 16
   %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
@@ -843,17 +1424,49 @@ define <4 x float> @insertps_from_vector
 }
 
 define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
-; X32-LABEL: insertps_from_broadcast_loadf32:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_broadcast_loadf32:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_broadcast_loadf32:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-SSE-NEXT:    insertps $48, (%ecx,%eax,4), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0x04,0x81,0x30]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_from_broadcast_loadf32:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX1-NEXT:    vinsertps $48, (%ecx,%eax,4), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x04,0x81,0x30]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_broadcast_loadf32:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX512-NEXT:    vbroadcastss (%ecx,%eax,4), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x0c,0x81]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_broadcast_loadf32:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    insertps $48, (%rdi,%rsi,4), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0x04,0xb7,0x30]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_from_broadcast_loadf32:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vinsertps $48, (%rdi,%rsi,4), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x04,0xb7,0x30]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_broadcast_loadf32:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vbroadcastss (%rdi,%rsi,4), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x0c,0xb7]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = getelementptr inbounds float, float* %fb, i64 %index
   %2 = load float, float* %1, align 4
   %3 = insertelement <4 x float> undef, float %2, i32 0
@@ -865,16 +1478,48 @@ define <4 x float> @insertps_from_broadc
 }
 
 define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
-; X32-LABEL: insertps_from_broadcast_loadv4f32:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_broadcast_loadv4f32:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_broadcast_loadv4f32:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movups (%eax), %xmm1 ## encoding: [0x0f,0x10,0x08]
+; X86-SSE-NEXT:    insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_from_broadcast_loadv4f32:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vinsertps $48, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x00,0x30]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_broadcast_loadv4f32:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vbroadcastss (%eax), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x08]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_broadcast_loadv4f32:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movups (%rdi), %xmm1 ## encoding: [0x0f,0x10,0x0f]
+; X64-SSE-NEXT:    insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_from_broadcast_loadv4f32:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vinsertps $48, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x07,0x30]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_broadcast_loadv4f32:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vbroadcastss (%rdi), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x0f]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load <4 x float>, <4 x float>* %b, align 4
   %2 = extractelement <4 x float> %1, i32 0
   %3 = insertelement <4 x float> undef, float %2, i32 0
@@ -886,31 +1531,109 @@ define <4 x float> @insertps_from_broadc
 }
 
 define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
-; X32-LABEL: insertps_from_broadcast_multiple_use:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
-; X32-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
-; X32-NEXT:    addps %xmm1, %xmm0
-; X32-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
-; X32-NEXT:    insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
-; X32-NEXT:    addps %xmm2, %xmm3
-; X32-NEXT:    addps %xmm3, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_from_broadcast_multiple_use:
-; X64:       ## %bb.0:
-; X64-NEXT:    movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
-; X64-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
-; X64-NEXT:    addps %xmm1, %xmm0
-; X64-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
-; X64-NEXT:    insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
-; X64-NEXT:    addps %xmm2, %xmm3
-; X64-NEXT:    addps %xmm3, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_from_broadcast_multiple_use:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-SSE-NEXT:    movss (%ecx,%eax,4), %xmm4 ## encoding: [0xf3,0x0f,0x10,0x24,0x81]
+; X86-SSE-NEXT:    ## xmm4 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    insertps $48, %xmm4, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc4,0x30]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm4[0]
+; X86-SSE-NEXT:    insertps $48, %xmm4, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x21,0xcc,0x30]
+; X86-SSE-NEXT:    ## xmm1 = xmm1[0,1,2],xmm4[0]
+; X86-SSE-NEXT:    addps %xmm1, %xmm0 ## encoding: [0x0f,0x58,0xc1]
+; X86-SSE-NEXT:    insertps $48, %xmm4, %xmm2 ## encoding: [0x66,0x0f,0x3a,0x21,0xd4,0x30]
+; X86-SSE-NEXT:    ## xmm2 = xmm2[0,1,2],xmm4[0]
+; X86-SSE-NEXT:    insertps $48, %xmm4, %xmm3 ## encoding: [0x66,0x0f,0x3a,0x21,0xdc,0x30]
+; X86-SSE-NEXT:    ## xmm3 = xmm3[0,1,2],xmm4[0]
+; X86-SSE-NEXT:    addps %xmm2, %xmm3 ## encoding: [0x0f,0x58,0xda]
+; X86-SSE-NEXT:    addps %xmm3, %xmm0 ## encoding: [0x0f,0x58,0xc3]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_from_broadcast_multiple_use:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX1-NEXT:    vbroadcastss (%ecx,%eax,4), %xmm4 ## encoding: [0xc4,0xe2,0x79,0x18,0x24,0x81]
+; X86-AVX1-NEXT:    vinsertps $48, %xmm4, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm4[0]
+; X86-AVX1-NEXT:    vinsertps $48, %xmm4, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
+; X86-AVX1-NEXT:    ## xmm1 = xmm1[0,1,2],xmm4[0]
+; X86-AVX1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X86-AVX1-NEXT:    vinsertps $48, %xmm4, %xmm2, %xmm1 ## encoding: [0xc4,0xe3,0x69,0x21,0xcc,0x30]
+; X86-AVX1-NEXT:    ## xmm1 = xmm2[0,1,2],xmm4[0]
+; X86-AVX1-NEXT:    vinsertps $48, %xmm4, %xmm3, %xmm2 ## encoding: [0xc4,0xe3,0x61,0x21,0xd4,0x30]
+; X86-AVX1-NEXT:    ## xmm2 = xmm3[0,1,2],xmm4[0]
+; X86-AVX1-NEXT:    vaddps %xmm2, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x58,0xca]
+; X86-AVX1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_broadcast_multiple_use:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX512-NEXT:    vbroadcastss (%ecx,%eax,4), %xmm4 ## encoding: [0xc4,0xe2,0x79,0x18,0x24,0x81]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm4[0]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
+; X86-AVX512-NEXT:    ## xmm1 = xmm1[0,1,2],xmm4[0]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xd4,0x30]
+; X86-AVX512-NEXT:    ## xmm2 = xmm2[0,1,2],xmm4[0]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xdc,0x30]
+; X86-AVX512-NEXT:    ## xmm3 = xmm3[0,1,2],xmm4[0]
+; X86-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X86-AVX512-NEXT:    vaddps %xmm3, %xmm2, %xmm1 ## encoding: [0xc5,0xe8,0x58,0xcb]
+; X86-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_from_broadcast_multiple_use:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movss (%rdi,%rsi,4), %xmm4 ## encoding: [0xf3,0x0f,0x10,0x24,0xb7]
+; X64-SSE-NEXT:    ## xmm4 = mem[0],zero,zero,zero
+; X64-SSE-NEXT:    insertps $48, %xmm4, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc4,0x30]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm4[0]
+; X64-SSE-NEXT:    insertps $48, %xmm4, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x21,0xcc,0x30]
+; X64-SSE-NEXT:    ## xmm1 = xmm1[0,1,2],xmm4[0]
+; X64-SSE-NEXT:    addps %xmm1, %xmm0 ## encoding: [0x0f,0x58,0xc1]
+; X64-SSE-NEXT:    insertps $48, %xmm4, %xmm2 ## encoding: [0x66,0x0f,0x3a,0x21,0xd4,0x30]
+; X64-SSE-NEXT:    ## xmm2 = xmm2[0,1,2],xmm4[0]
+; X64-SSE-NEXT:    insertps $48, %xmm4, %xmm3 ## encoding: [0x66,0x0f,0x3a,0x21,0xdc,0x30]
+; X64-SSE-NEXT:    ## xmm3 = xmm3[0,1,2],xmm4[0]
+; X64-SSE-NEXT:    addps %xmm2, %xmm3 ## encoding: [0x0f,0x58,0xda]
+; X64-SSE-NEXT:    addps %xmm3, %xmm0 ## encoding: [0x0f,0x58,0xc3]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_from_broadcast_multiple_use:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vbroadcastss (%rdi,%rsi,4), %xmm4 ## encoding: [0xc4,0xe2,0x79,0x18,0x24,0xb7]
+; X64-AVX1-NEXT:    vinsertps $48, %xmm4, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm4[0]
+; X64-AVX1-NEXT:    vinsertps $48, %xmm4, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
+; X64-AVX1-NEXT:    ## xmm1 = xmm1[0,1,2],xmm4[0]
+; X64-AVX1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X64-AVX1-NEXT:    vinsertps $48, %xmm4, %xmm2, %xmm1 ## encoding: [0xc4,0xe3,0x69,0x21,0xcc,0x30]
+; X64-AVX1-NEXT:    ## xmm1 = xmm2[0,1,2],xmm4[0]
+; X64-AVX1-NEXT:    vinsertps $48, %xmm4, %xmm3, %xmm2 ## encoding: [0xc4,0xe3,0x61,0x21,0xd4,0x30]
+; X64-AVX1-NEXT:    ## xmm2 = xmm3[0,1,2],xmm4[0]
+; X64-AVX1-NEXT:    vaddps %xmm2, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x58,0xca]
+; X64-AVX1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_broadcast_multiple_use:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vbroadcastss (%rdi,%rsi,4), %xmm4 ## encoding: [0xc4,0xe2,0x79,0x18,0x24,0xb7]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm4[0]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
+; X64-AVX512-NEXT:    ## xmm1 = xmm1[0,1,2],xmm4[0]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xd4,0x30]
+; X64-AVX512-NEXT:    ## xmm2 = xmm2[0,1,2],xmm4[0]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xdc,0x30]
+; X64-AVX512-NEXT:    ## xmm3 = xmm3[0,1,2],xmm4[0]
+; X64-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X64-AVX512-NEXT:    vaddps %xmm3, %xmm2, %xmm1 ## encoding: [0xc5,0xe8,0x58,0xcb]
+; X64-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = getelementptr inbounds float, float* %fb, i64 %index
   %2 = load float, float* %1, align 4
   %3 = insertelement <4 x float> undef, float %2, i32 0
@@ -928,20 +1651,58 @@ define <4 x float> @insertps_from_broadc
 }
 
 define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
-; X32-LABEL: insertps_with_undefs:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; X32-NEXT:    movaps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_with_undefs:
-; X64:       ## %bb.0:
-; X64-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X64-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; X64-NEXT:    movaps %xmm1, %xmm0
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_with_undefs:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movss (%eax), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x08]
+; X86-SSE-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
+; X86-SSE-NEXT:    ## xmm1 = xmm1[0],xmm0[0]
+; X86-SSE-NEXT:    movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: insertps_with_undefs:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovss (%eax), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x08]
+; X86-AVX1-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
+; X86-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[0]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_with_undefs:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
+; X86-AVX512-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
+; X86-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[0]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_with_undefs:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movss (%rdi), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x0f]
+; X64-SSE-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT:    movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
+; X64-SSE-NEXT:    ## xmm1 = xmm1[0],xmm0[0]
+; X64-SSE-NEXT:    movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: insertps_with_undefs:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovss (%rdi), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x0f]
+; X64-AVX1-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
+; X64-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[0]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_with_undefs:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
+; X64-AVX512-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
+; X64-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[0]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load float, float* %b, align 4
   %2 = insertelement <4 x float> undef, float %1, i32 0
   %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7>
@@ -951,16 +1712,50 @@ define <4 x float> @insertps_with_undefs
 ; Test for a bug in X86ISelLowering.cpp:getINSERTPS where we were using
 ; the destination index to change the load, instead of the source index.
 define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
-; X32-LABEL: pr20087:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: pr20087:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[0]
-; X64-NEXT:    retq
+; X86-SSE-LABEL: pr20087:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movaps (%eax), %xmm1 ## encoding: [0x0f,0x28,0x08]
+; X86-SSE-NEXT:    insertps $178, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xb2]
+; X86-SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX1-LABEL: pr20087:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX1-NEXT:    vinsertps $178, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: pr20087:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX512-NEXT:    vinsertps $178, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: pr20087:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movaps (%rdi), %xmm1 ## encoding: [0x0f,0x28,0x0f]
+; X64-SSE-NEXT:    insertps $178, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xb2]
+; X64-SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX1-LABEL: pr20087:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX1-NEXT:    vinsertps $178, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: pr20087:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX512-NEXT:    vinsertps $178, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %load = load <4 x float> , <4 x float> *%ptr
   %ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
   ret <4 x float> %ret
@@ -968,20 +1763,43 @@ define <4 x float> @pr20087(<4 x float>
 
 ; Edge case for insertps where we end up with a shuffle with mask=<0, 7, -1, -1>
 define void @insertps_pr20411(<4 x i32> %shuffle109, <4 x i32> %shuffle116, i32* noalias nocapture %RET) #1 {
-; X32-LABEL: insertps_pr20411:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X32-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; X32-NEXT:    movdqu %xmm1, (%eax)
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_pr20411:
-; X64:       ## %bb.0:
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X64-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; X64-NEXT:    movdqu %xmm1, (%rdi)
-; X64-NEXT:    retq
+; X86-SSE-LABEL: insertps_pr20411:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    pshufd $78, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0x4e]
+; X86-SSE-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; X86-SSE-NEXT:    pblendw $243, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc8,0xf3]
+; X86-SSE-NEXT:    ## xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; X86-SSE-NEXT:    movdqu %xmm1, (%eax) ## encoding: [0xf3,0x0f,0x7f,0x08]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: insertps_pr20411:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; X86-AVX-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; X86-AVX-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; X86-AVX-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; X86-AVX-NEXT:    vmovups %xmm0, (%eax) ## encoding: [0xc5,0xf8,0x11,0x00]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: insertps_pr20411:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    pshufd $78, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0x4e]
+; X64-SSE-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; X64-SSE-NEXT:    pblendw $243, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc8,0xf3]
+; X64-SSE-NEXT:    ## xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; X64-SSE-NEXT:    movdqu %xmm1, (%rdi) ## encoding: [0xf3,0x0f,0x7f,0x0f]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: insertps_pr20411:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; X64-AVX-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; X64-AVX-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; X64-AVX-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; X64-AVX-NEXT:    vmovups %xmm0, (%rdi) ## encoding: [0xc5,0xf8,0x11,0x07]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %shuffle117 = shufflevector <4 x i32> %shuffle109, <4 x i32> %shuffle116, <4 x i32> <i32 0, i32 7, i32 undef, i32 undef>
   %ptrcast = bitcast i32* %RET to <4 x i32>*
   store <4 x i32> %shuffle117, <4 x i32>* %ptrcast, align 4
@@ -989,15 +1807,23 @@ define void @insertps_pr20411(<4 x i32>
 }
 
 define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) {
-; X32-LABEL: insertps_4:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_4:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_4:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $170, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xaa]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm1[2],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_4:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $170, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xaa]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm1[2],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_4:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $170, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xaa]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm1[2],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
@@ -1008,15 +1834,23 @@ define <4 x float> @insertps_4(<4 x floa
 }
 
 define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) {
-; X32-LABEL: insertps_5:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_5:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_5:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $92, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x5c]
+; SSE-NEXT:    ## xmm0 = xmm0[0],xmm1[1],zero,zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_5:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $92, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x5c]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],xmm1[1],zero,zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_5:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $92, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x5c]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1],zero,zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %B, i32 1
@@ -1027,15 +1861,23 @@ define <4 x float> @insertps_5(<4 x floa
 }
 
 define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) {
-; X32-LABEL: insertps_6:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_6:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_6:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $169, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xa9]
+; SSE-NEXT:    ## xmm0 = zero,xmm0[1],xmm1[2],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_6:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $169, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xa9]
+; AVX1-NEXT:    ## xmm0 = zero,xmm0[1],xmm1[2],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_6:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $169, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xa9]
+; AVX512-NEXT:    ## xmm0 = zero,xmm0[1],xmm1[2],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 1
   %vecinit = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %vecext, i32 1
   %vecext1 = extractelement <4 x float> %B, i32 2
@@ -1045,15 +1887,23 @@ define <4 x float> @insertps_6(<4 x floa
 }
 
 define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) {
-; X32-LABEL: insertps_7:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_7:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_7:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $106, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x6a]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm1[1],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_7:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $106, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x6a]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm1[1],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_7:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $106, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x6a]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm1[1],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
@@ -1064,15 +1914,23 @@ define <4 x float> @insertps_7(<4 x floa
 }
 
 define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) {
-; X32-LABEL: insertps_8:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_8:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_8:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $28, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x1c]
+; SSE-NEXT:    ## xmm0 = xmm0[0],xmm1[0],zero,zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_8:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $28, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x1c]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],xmm1[0],zero,zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_8:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $28, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x1c]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[0],zero,zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %B, i32 0
@@ -1083,17 +1941,24 @@ define <4 x float> @insertps_8(<4 x floa
 }
 
 define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) {
-; X32-LABEL: insertps_9:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
-; X32-NEXT:    movaps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_9:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
-; X64-NEXT:    movaps %xmm1, %xmm0
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_9:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $25, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x21,0xc8,0x19]
+; SSE-NEXT:    ## xmm1 = zero,xmm0[0],xmm1[2],zero
+; SSE-NEXT:    movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_9:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $25, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x21,0xc0,0x19]
+; AVX1-NEXT:    ## xmm0 = zero,xmm0[0],xmm1[2],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_9:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $25, %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x71,0x21,0xc0,0x19]
+; AVX512-NEXT:    ## xmm0 = zero,xmm0[0],xmm1[2],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 0
   %vecinit = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %vecext, i32 1
   %vecext1 = extractelement <4 x float> %B, i32 2
@@ -1103,15 +1968,23 @@ define <4 x float> @insertps_9(<4 x floa
 }
 
 define <4 x float> @insertps_10(<4 x float> %A) {
-; X32-LABEL: insertps_10:
-; X32:       ## %bb.0:
-; X32-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: insertps_10:
-; X64:       ## %bb.0:
-; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
-; X64-NEXT:    retq
+; SSE-LABEL: insertps_10:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    insertps $42, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0x2a]
+; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[0],zero
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: insertps_10:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vinsertps $42, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x2a]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[0],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_10:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vinsertps $42, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x2a]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[0],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 0
   %vecbuild1 = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %vecext, i32 0
   %vecbuild2 = insertelement <4 x float> %vecbuild1, float %vecext, i32 2
@@ -1119,17 +1992,19 @@ define <4 x float> @insertps_10(<4 x flo
 }
 
 define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) {
-; X32-LABEL: build_vector_to_shuffle_1:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorps %xmm1, %xmm1
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: build_vector_to_shuffle_1:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorps %xmm1, %xmm1
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
-; X64-NEXT:    retq
+; SSE-LABEL: build_vector_to_shuffle_1:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
+; SSE-NEXT:    blendps $5, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x05]
+; SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: build_vector_to_shuffle_1:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX-NEXT:    vblendps $10, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x0a]
+; AVX-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 1
   %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
@@ -1138,17 +2013,19 @@ define <4 x float> @build_vector_to_shuf
 }
 
 define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) {
-; X32-LABEL: build_vector_to_shuffle_2:
-; X32:       ## %bb.0:
-; X32-NEXT:    xorps %xmm1, %xmm1
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: build_vector_to_shuffle_2:
-; X64:       ## %bb.0:
-; X64-NEXT:    xorps %xmm1, %xmm1
-; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
-; X64-NEXT:    retq
+; SSE-LABEL: build_vector_to_shuffle_2:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
+; SSE-NEXT:    blendps $13, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x0d]
+; SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: build_vector_to_shuffle_2:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX-NEXT:    vblendps $2, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x02]
+; AVX-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 1
   %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2

Modified: llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll Sat Jun  2 10:33:26 2018
@@ -1,25 +1,27 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c
 
 define i64 @test_mm_crc64_u8(i64 %a0, i8 %a1) nounwind{
-; X64-LABEL: test_mm_crc64_u8:
-; X64:       # %bb.0:
-; X64-NEXT:    crc32b %sil, %edi
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_crc64_u8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    crc32b %sil, %edi
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    retq
   %res = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a0, i8 %a1)
   ret i64 %res
 }
 declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind readnone
 
 define i64 @test_mm_crc64_u64(i64 %a0, i64 %a1) nounwind{
-; X64-LABEL: test_mm_crc64_u64:
-; X64:       # %bb.0:
-; X64-NEXT:    crc32q %rsi, %rdi
-; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_crc64_u64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    crc32q %rsi, %rdi
+; CHECK-NEXT:    movq %rdi, %rax
+; CHECK-NEXT:    retq
   %res = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1)
   ret i64 %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll Sat Jun  2 10:33:26 2018
@@ -1,31 +1,57 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=X32
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c
 
 define i32 @test_mm_cmpestra(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
-; X32-LABEL: test_mm_cmpestra:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X32-NEXT:    seta %bl
-; X32-NEXT:    movl %ebx, %eax
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    retl
+; X86-SSE-LABEL: test_mm_cmpestra:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    xorl %ebx, %ebx
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X86-SSE-NEXT:    seta %bl
+; X86-SSE-NEXT:    movl %ebx, %eax
+; X86-SSE-NEXT:    popl %ebx
+; X86-SSE-NEXT:    retl
 ;
-; X64-LABEL: test_mm_cmpestra:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %r8d, %r8d
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    movl %esi, %edx
-; X64-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X64-NEXT:    seta %r8b
-; X64-NEXT:    movl %r8d, %eax
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_mm_cmpestra:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %ebx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    xorl %ebx, %ebx
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X86-AVX-NEXT:    seta %bl
+; X86-AVX-NEXT:    movl %ebx, %eax
+; X86-AVX-NEXT:    popl %ebx
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cmpestra:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    xorl %r8d, %r8d
+; X64-SSE-NEXT:    movl %edi, %eax
+; X64-SSE-NEXT:    movl %esi, %edx
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X64-SSE-NEXT:    seta %r8b
+; X64-SSE-NEXT:    movl %r8d, %eax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cmpestra:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    xorl %r8d, %r8d
+; X64-AVX-NEXT:    movl %edi, %eax
+; X64-AVX-NEXT:    movl %esi, %edx
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X64-AVX-NEXT:    seta %r8b
+; X64-AVX-NEXT:    movl %r8d, %eax
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpestria128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
@@ -34,27 +60,49 @@ define i32 @test_mm_cmpestra(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
 
 define i32 @test_mm_cmpestrc(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
-; X32-LABEL: test_mm_cmpestrc:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X32-NEXT:    setb %bl
-; X32-NEXT:    movl %ebx, %eax
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    retl
+; X86-SSE-LABEL: test_mm_cmpestrc:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    xorl %ebx, %ebx
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X86-SSE-NEXT:    setb %bl
+; X86-SSE-NEXT:    movl %ebx, %eax
+; X86-SSE-NEXT:    popl %ebx
+; X86-SSE-NEXT:    retl
 ;
-; X64-LABEL: test_mm_cmpestrc:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %r8d, %r8d
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    movl %esi, %edx
-; X64-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X64-NEXT:    setb %r8b
-; X64-NEXT:    movl %r8d, %eax
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_mm_cmpestrc:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %ebx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    xorl %ebx, %ebx
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X86-AVX-NEXT:    setb %bl
+; X86-AVX-NEXT:    movl %ebx, %eax
+; X86-AVX-NEXT:    popl %ebx
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cmpestrc:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    xorl %r8d, %r8d
+; X64-SSE-NEXT:    movl %edi, %eax
+; X64-SSE-NEXT:    movl %esi, %edx
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X64-SSE-NEXT:    setb %r8b
+; X64-SSE-NEXT:    movl %r8d, %eax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cmpestrc:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    xorl %r8d, %r8d
+; X64-AVX-NEXT:    movl %edi, %eax
+; X64-AVX-NEXT:    movl %esi, %edx
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X64-AVX-NEXT:    setb %r8b
+; X64-AVX-NEXT:    movl %r8d, %eax
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpestric128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
@@ -63,21 +111,37 @@ define i32 @test_mm_cmpestrc(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpestric128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
 
 define i32 @test_mm_cmpestri(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
-; X32-LABEL: test_mm_cmpestri:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:    retl
+; X86-SSE-LABEL: test_mm_cmpestri:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X86-SSE-NEXT:    movl %ecx, %eax
+; X86-SSE-NEXT:    retl
 ;
-; X64-LABEL: test_mm_cmpestri:
-; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    movl %esi, %edx
-; X64-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X64-NEXT:    movl %ecx, %eax
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_mm_cmpestri:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X86-AVX-NEXT:    movl %ecx, %eax
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cmpestri:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movl %edi, %eax
+; X64-SSE-NEXT:    movl %esi, %edx
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X64-SSE-NEXT:    movl %ecx, %eax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cmpestri:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movl %edi, %eax
+; X64-AVX-NEXT:    movl %esi, %edx
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X64-AVX-NEXT:    movl %ecx, %eax
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
@@ -86,19 +150,33 @@ define i32 @test_mm_cmpestri(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
 
 define <2 x i64> @test_mm_cmpestrm(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
-; X32-LABEL: test_mm_cmpestrm:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    pcmpestrm $7, %xmm1, %xmm0
-; X32-NEXT:    retl
+; X86-SSE-LABEL: test_mm_cmpestrm:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    pcmpestrm $7, %xmm1, %xmm0
+; X86-SSE-NEXT:    retl
 ;
-; X64-LABEL: test_mm_cmpestrm:
-; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    movl %esi, %edx
-; X64-NEXT:    pcmpestrm $7, %xmm1, %xmm0
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_mm_cmpestrm:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vpcmpestrm $7, %xmm1, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cmpestrm:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movl %edi, %eax
+; X64-SSE-NEXT:    movl %esi, %edx
+; X64-SSE-NEXT:    pcmpestrm $7, %xmm1, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cmpestrm:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movl %edi, %eax
+; X64-AVX-NEXT:    movl %esi, %edx
+; X64-AVX-NEXT:    vpcmpestrm $7, %xmm1, %xmm0
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
@@ -108,27 +186,49 @@ define <2 x i64> @test_mm_cmpestrm(<2 x
 declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
 
 define i32 @test_mm_cmpestro(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
-; X32-LABEL: test_mm_cmpestro:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X32-NEXT:    seto %bl
-; X32-NEXT:    movl %ebx, %eax
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    retl
+; X86-SSE-LABEL: test_mm_cmpestro:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    xorl %ebx, %ebx
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X86-SSE-NEXT:    seto %bl
+; X86-SSE-NEXT:    movl %ebx, %eax
+; X86-SSE-NEXT:    popl %ebx
+; X86-SSE-NEXT:    retl
 ;
-; X64-LABEL: test_mm_cmpestro:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %r8d, %r8d
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    movl %esi, %edx
-; X64-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X64-NEXT:    seto %r8b
-; X64-NEXT:    movl %r8d, %eax
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_mm_cmpestro:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %ebx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    xorl %ebx, %ebx
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X86-AVX-NEXT:    seto %bl
+; X86-AVX-NEXT:    movl %ebx, %eax
+; X86-AVX-NEXT:    popl %ebx
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cmpestro:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    xorl %r8d, %r8d
+; X64-SSE-NEXT:    movl %edi, %eax
+; X64-SSE-NEXT:    movl %esi, %edx
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X64-SSE-NEXT:    seto %r8b
+; X64-SSE-NEXT:    movl %r8d, %eax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cmpestro:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    xorl %r8d, %r8d
+; X64-AVX-NEXT:    movl %edi, %eax
+; X64-AVX-NEXT:    movl %esi, %edx
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X64-AVX-NEXT:    seto %r8b
+; X64-AVX-NEXT:    movl %r8d, %eax
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
@@ -137,27 +237,49 @@ define i32 @test_mm_cmpestro(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
 
 define i32 @test_mm_cmpestrs(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
-; X32-LABEL: test_mm_cmpestrs:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X32-NEXT:    sets %bl
-; X32-NEXT:    movl %ebx, %eax
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    retl
+; X86-SSE-LABEL: test_mm_cmpestrs:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    xorl %ebx, %ebx
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X86-SSE-NEXT:    sets %bl
+; X86-SSE-NEXT:    movl %ebx, %eax
+; X86-SSE-NEXT:    popl %ebx
+; X86-SSE-NEXT:    retl
 ;
-; X64-LABEL: test_mm_cmpestrs:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %r8d, %r8d
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    movl %esi, %edx
-; X64-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X64-NEXT:    sets %r8b
-; X64-NEXT:    movl %r8d, %eax
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_mm_cmpestrs:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %ebx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    xorl %ebx, %ebx
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X86-AVX-NEXT:    sets %bl
+; X86-AVX-NEXT:    movl %ebx, %eax
+; X86-AVX-NEXT:    popl %ebx
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cmpestrs:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    xorl %r8d, %r8d
+; X64-SSE-NEXT:    movl %edi, %eax
+; X64-SSE-NEXT:    movl %esi, %edx
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X64-SSE-NEXT:    sets %r8b
+; X64-SSE-NEXT:    movl %r8d, %eax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cmpestrs:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    xorl %r8d, %r8d
+; X64-AVX-NEXT:    movl %edi, %eax
+; X64-AVX-NEXT:    movl %esi, %edx
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X64-AVX-NEXT:    sets %r8b
+; X64-AVX-NEXT:    movl %r8d, %eax
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpestris128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
@@ -166,27 +288,49 @@ define i32 @test_mm_cmpestrs(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpestris128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
 
 define i32 @test_mm_cmpestrz(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
-; X32-LABEL: test_mm_cmpestrz:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X32-NEXT:    sete %bl
-; X32-NEXT:    movl %ebx, %eax
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    retl
+; X86-SSE-LABEL: test_mm_cmpestrz:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    xorl %ebx, %ebx
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X86-SSE-NEXT:    sete %bl
+; X86-SSE-NEXT:    movl %ebx, %eax
+; X86-SSE-NEXT:    popl %ebx
+; X86-SSE-NEXT:    retl
 ;
-; X64-LABEL: test_mm_cmpestrz:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %r8d, %r8d
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    movl %esi, %edx
-; X64-NEXT:    pcmpestri $7, %xmm1, %xmm0
-; X64-NEXT:    sete %r8b
-; X64-NEXT:    movl %r8d, %eax
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_mm_cmpestrz:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %ebx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    xorl %ebx, %ebx
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X86-AVX-NEXT:    sete %bl
+; X86-AVX-NEXT:    movl %ebx, %eax
+; X86-AVX-NEXT:    popl %ebx
+; X86-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_mm_cmpestrz:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    xorl %r8d, %r8d
+; X64-SSE-NEXT:    movl %edi, %eax
+; X64-SSE-NEXT:    movl %esi, %edx
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0
+; X64-SSE-NEXT:    sete %r8b
+; X64-SSE-NEXT:    movl %r8d, %eax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_mm_cmpestrz:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    xorl %r8d, %r8d
+; X64-AVX-NEXT:    movl %edi, %eax
+; X64-AVX-NEXT:    movl %esi, %edx
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0
+; X64-AVX-NEXT:    sete %r8b
+; X64-AVX-NEXT:    movl %r8d, %eax
+; X64-AVX-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
@@ -195,22 +339,34 @@ define i32 @test_mm_cmpestrz(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
 
 define <2 x i64> @test_mm_cmpgt_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; ALL-LABEL: test_mm_cmpgt_epi64:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pcmpgtq %xmm1, %xmm0
-; ALL-NEXT:    ret{{[l|q]}}
+; SSE-LABEL: test_mm_cmpgt_epi64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpgtq %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpgt_epi64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %cmp = icmp sgt <2 x i64> %a0, %a1
   %res = sext <2 x i1> %cmp to <2 x i64>
   ret <2 x i64> %res
 }
 
 define i32 @test_mm_cmpistra(<2 x i64> %a0, <2 x i64> %a1) {
-; ALL-LABEL: test_mm_cmpistra:
-; ALL:       # %bb.0:
-; ALL-NEXT:    xorl %eax, %eax
-; ALL-NEXT:    pcmpistri $7, %xmm1, %xmm0
-; ALL-NEXT:    seta %al
-; ALL-NEXT:    ret{{[l|q]}}
+; SSE-LABEL: test_mm_cmpistra:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0
+; SSE-NEXT:    seta %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpistra:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0
+; AVX-NEXT:    seta %al
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpistria128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
@@ -219,12 +375,19 @@ define i32 @test_mm_cmpistra(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define i32 @test_mm_cmpistrc(<2 x i64> %a0, <2 x i64> %a1) {
-; ALL-LABEL: test_mm_cmpistrc:
-; ALL:       # %bb.0:
-; ALL-NEXT:    xorl %eax, %eax
-; ALL-NEXT:    pcmpistri $7, %xmm1, %xmm0
-; ALL-NEXT:    setb %al
-; ALL-NEXT:    ret{{[l|q]}}
+; SSE-LABEL: test_mm_cmpistrc:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0
+; SSE-NEXT:    setb %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpistrc:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpistric128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
@@ -233,11 +396,17 @@ define i32 @test_mm_cmpistrc(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpistric128(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define i32 @test_mm_cmpistri(<2 x i64> %a0, <2 x i64> %a1) {
-; ALL-LABEL: test_mm_cmpistri:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pcmpistri $7, %xmm1, %xmm0
-; ALL-NEXT:    movl %ecx, %eax
-; ALL-NEXT:    ret{{[l|q]}}
+; SSE-LABEL: test_mm_cmpistri:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0
+; SSE-NEXT:    movl %ecx, %eax
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpistri:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0
+; AVX-NEXT:    movl %ecx, %eax
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
@@ -246,10 +415,15 @@ define i32 @test_mm_cmpistri(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_cmpistrm(<2 x i64> %a0, <2 x i64> %a1) {
-; ALL-LABEL: test_mm_cmpistrm:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pcmpistrm $7, %xmm1, %xmm0
-; ALL-NEXT:    ret{{[l|q]}}
+; SSE-LABEL: test_mm_cmpistrm:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpistrm $7, %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpistrm:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpistrm $7, %xmm1, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
@@ -259,12 +433,19 @@ define <2 x i64> @test_mm_cmpistrm(<2 x
 declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define i32 @test_mm_cmpistro(<2 x i64> %a0, <2 x i64> %a1) {
-; ALL-LABEL: test_mm_cmpistro:
-; ALL:       # %bb.0:
-; ALL-NEXT:    xorl %eax, %eax
-; ALL-NEXT:    pcmpistri $7, %xmm1, %xmm0
-; ALL-NEXT:    seto %al
-; ALL-NEXT:    ret{{[l|q]}}
+; SSE-LABEL: test_mm_cmpistro:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0
+; SSE-NEXT:    seto %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpistro:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0
+; AVX-NEXT:    seto %al
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
@@ -273,12 +454,19 @@ define i32 @test_mm_cmpistro(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define i32 @test_mm_cmpistrs(<2 x i64> %a0, <2 x i64> %a1) {
-; ALL-LABEL: test_mm_cmpistrs:
-; ALL:       # %bb.0:
-; ALL-NEXT:    xorl %eax, %eax
-; ALL-NEXT:    pcmpistri $7, %xmm1, %xmm0
-; ALL-NEXT:    sets %al
-; ALL-NEXT:    ret{{[l|q]}}
+; SSE-LABEL: test_mm_cmpistrs:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0
+; SSE-NEXT:    sets %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpistrs:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0
+; AVX-NEXT:    sets %al
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpistris128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
@@ -287,12 +475,19 @@ define i32 @test_mm_cmpistrs(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpistris128(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define i32 @test_mm_cmpistrz(<2 x i64> %a0, <2 x i64> %a1) {
-; ALL-LABEL: test_mm_cmpistrz:
-; ALL:       # %bb.0:
-; ALL-NEXT:    xorl %eax, %eax
-; ALL-NEXT:    pcmpistri $7, %xmm1, %xmm0
-; ALL-NEXT:    sete %al
-; ALL-NEXT:    ret{{[l|q]}}
+; SSE-LABEL: test_mm_cmpistrz:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0
+; SSE-NEXT:    sete %al
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: test_mm_cmpistrz:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
@@ -301,11 +496,11 @@ define i32 @test_mm_cmpistrz(<2 x i64> %
 declare i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define i32 @test_mm_crc32_u8(i32 %a0, i8 %a1) {
-; X32-LABEL: test_mm_crc32_u8:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    crc32b {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    retl
+; X86-LABEL: test_mm_crc32_u8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    crc32b {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_crc32_u8:
 ; X64:       # %bb.0:
@@ -318,11 +513,11 @@ define i32 @test_mm_crc32_u8(i32 %a0, i8
 declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind readnone
 
 define i32 @test_mm_crc32_u16(i32 %a0, i16 %a1) {
-; X32-LABEL: test_mm_crc32_u16:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    crc32w {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    retl
+; X86-LABEL: test_mm_crc32_u16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    crc32w {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_crc32_u16:
 ; X64:       # %bb.0:
@@ -335,11 +530,11 @@ define i32 @test_mm_crc32_u16(i32 %a0, i
 declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind readnone
 
 define i32 @test_mm_crc32_u32(i32 %a0, i32 %a1) {
-; X32-LABEL: test_mm_crc32_u32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    crc32l {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    retl
+; X86-LABEL: test_mm_crc32_u32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    crc32l {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_crc32_u32:
 ; X64:       # %bb.0:

Modified: llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll Sat Jun  2 10:33:26 2018
@@ -1,24 +1,27 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE42
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) {
-; SSE42-LABEL: test_x86_sse42_pcmpestri128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
-; SSE42-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpestri128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; VCHECK-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpestri128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpestri128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -26,38 +29,45 @@ declare i32 @llvm.x86.sse42.pcmpestri128
 
 
 define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
-; SSE42-LABEL: test_x86_sse42_pcmpestri128_load:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
-; SSE42-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE42-NEXT:    movdqa (%eax), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x00]
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    pcmpestri $7, (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0x01,0x07]
-; SSE42-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse42_pcmpestri128_load:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT:    vmovdqa (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x00]
-; AVX2-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; AVX2-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; AVX2-NEXT:    vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
-; AVX2-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse42_pcmpestri128_load:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SKX-NEXT:    vmovdqa (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x00]
-; SKX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SKX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SKX-NEXT:    vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
-; SKX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpestri128_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movdqa (%eax), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x00]
+; X86-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    pcmpestri $7, (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0x01,0x07]
+; X86-SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpestri128_load:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovdqa (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x00]
+; X86-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
+; X86-AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpestri128_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x07]
+; X64-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    pcmpestri $7, (%rsi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0x06,0x07]
+; X64-SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpestri128_load:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x07]
+; X64-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    vpcmpestri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x06,0x07]
+; X64-AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %1 = load <16 x i8>, <16 x i8>* %a0
   %2 = load <16 x i8>, <16 x i8>* %a2
   %res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1]
@@ -66,29 +76,49 @@ define i32 @test_x86_sse42_pcmpestri128_
 
 
 define i32 @test_x86_sse42_pcmpestria128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; SSE42-LABEL: test_x86_sse42_pcmpestria128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    pushl %ebx ## encoding: [0x53]
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; SSE42-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
-; SSE42-NEXT:    seta %bl ## encoding: [0x0f,0x97,0xc3]
-; SSE42-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; SSE42-NEXT:    popl %ebx ## encoding: [0x5b]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpestria128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    pushl %ebx ## encoding: [0x53]
-; VCHECK-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; VCHECK-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; VCHECK-NEXT:    seta %bl ## encoding: [0x0f,0x97,0xc3]
-; VCHECK-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; VCHECK-NEXT:    popl %ebx ## encoding: [0x5b]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpestria128:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X86-SSE-NEXT:    seta %bl ## encoding: [0x0f,0x97,0xc3]
+; X86-SSE-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-SSE-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpestria128:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X86-AVX-NEXT:    seta %bl ## encoding: [0x0f,0x97,0xc3]
+; X86-AVX-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-AVX-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpestria128:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X64-SSE-NEXT:    seta %sil ## encoding: [0x40,0x0f,0x97,0xc6]
+; X64-SSE-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpestria128:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X64-AVX-NEXT:    seta %sil ## encoding: [0x40,0x0f,0x97,0xc6]
+; X64-AVX-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpestria128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -96,29 +126,49 @@ declare i32 @llvm.x86.sse42.pcmpestria12
 
 
 define i32 @test_x86_sse42_pcmpestric128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; SSE42-LABEL: test_x86_sse42_pcmpestric128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    pushl %ebx ## encoding: [0x53]
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; SSE42-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
-; SSE42-NEXT:    setb %bl ## encoding: [0x0f,0x92,0xc3]
-; SSE42-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; SSE42-NEXT:    popl %ebx ## encoding: [0x5b]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpestric128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    pushl %ebx ## encoding: [0x53]
-; VCHECK-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; VCHECK-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; VCHECK-NEXT:    setb %bl ## encoding: [0x0f,0x92,0xc3]
-; VCHECK-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; VCHECK-NEXT:    popl %ebx ## encoding: [0x5b]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpestric128:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X86-SSE-NEXT:    setb %bl ## encoding: [0x0f,0x92,0xc3]
+; X86-SSE-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-SSE-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpestric128:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X86-AVX-NEXT:    setb %bl ## encoding: [0x0f,0x92,0xc3]
+; X86-AVX-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-AVX-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpestric128:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X64-SSE-NEXT:    setb %sil ## encoding: [0x40,0x0f,0x92,0xc6]
+; X64-SSE-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpestric128:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X64-AVX-NEXT:    setb %sil ## encoding: [0x40,0x0f,0x92,0xc6]
+; X64-AVX-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpestric128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -126,29 +176,49 @@ declare i32 @llvm.x86.sse42.pcmpestric12
 
 
 define i32 @test_x86_sse42_pcmpestrio128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; SSE42-LABEL: test_x86_sse42_pcmpestrio128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    pushl %ebx ## encoding: [0x53]
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; SSE42-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
-; SSE42-NEXT:    seto %bl ## encoding: [0x0f,0x90,0xc3]
-; SSE42-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; SSE42-NEXT:    popl %ebx ## encoding: [0x5b]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpestrio128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    pushl %ebx ## encoding: [0x53]
-; VCHECK-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; VCHECK-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; VCHECK-NEXT:    seto %bl ## encoding: [0x0f,0x90,0xc3]
-; VCHECK-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; VCHECK-NEXT:    popl %ebx ## encoding: [0x5b]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpestrio128:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X86-SSE-NEXT:    seto %bl ## encoding: [0x0f,0x90,0xc3]
+; X86-SSE-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-SSE-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpestrio128:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X86-AVX-NEXT:    seto %bl ## encoding: [0x0f,0x90,0xc3]
+; X86-AVX-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-AVX-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpestrio128:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X64-SSE-NEXT:    seto %sil ## encoding: [0x40,0x0f,0x90,0xc6]
+; X64-SSE-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpestrio128:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X64-AVX-NEXT:    seto %sil ## encoding: [0x40,0x0f,0x90,0xc6]
+; X64-AVX-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -156,29 +226,49 @@ declare i32 @llvm.x86.sse42.pcmpestrio12
 
 
 define i32 @test_x86_sse42_pcmpestris128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; SSE42-LABEL: test_x86_sse42_pcmpestris128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    pushl %ebx ## encoding: [0x53]
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; SSE42-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
-; SSE42-NEXT:    sets %bl ## encoding: [0x0f,0x98,0xc3]
-; SSE42-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; SSE42-NEXT:    popl %ebx ## encoding: [0x5b]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpestris128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    pushl %ebx ## encoding: [0x53]
-; VCHECK-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; VCHECK-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; VCHECK-NEXT:    sets %bl ## encoding: [0x0f,0x98,0xc3]
-; VCHECK-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; VCHECK-NEXT:    popl %ebx ## encoding: [0x5b]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpestris128:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X86-SSE-NEXT:    sets %bl ## encoding: [0x0f,0x98,0xc3]
+; X86-SSE-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-SSE-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpestris128:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X86-AVX-NEXT:    sets %bl ## encoding: [0x0f,0x98,0xc3]
+; X86-AVX-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-AVX-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpestris128:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X64-SSE-NEXT:    sets %sil ## encoding: [0x40,0x0f,0x98,0xc6]
+; X64-SSE-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpestris128:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X64-AVX-NEXT:    sets %sil ## encoding: [0x40,0x0f,0x98,0xc6]
+; X64-AVX-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpestris128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -186,29 +276,49 @@ declare i32 @llvm.x86.sse42.pcmpestris12
 
 
 define i32 @test_x86_sse42_pcmpestriz128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; SSE42-LABEL: test_x86_sse42_pcmpestriz128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    pushl %ebx ## encoding: [0x53]
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; SSE42-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
-; SSE42-NEXT:    sete %bl ## encoding: [0x0f,0x94,0xc3]
-; SSE42-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; SSE42-NEXT:    popl %ebx ## encoding: [0x5b]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpestriz128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    pushl %ebx ## encoding: [0x53]
-; VCHECK-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; VCHECK-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; VCHECK-NEXT:    sete %bl ## encoding: [0x0f,0x94,0xc3]
-; VCHECK-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
-; VCHECK-NEXT:    popl %ebx ## encoding: [0x5b]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpestriz128:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X86-SSE-NEXT:    sete %bl ## encoding: [0x0f,0x94,0xc3]
+; X86-SSE-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-SSE-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpestriz128:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    pushl %ebx ## encoding: [0x53]
+; X86-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    xorl %ebx, %ebx ## encoding: [0x31,0xdb]
+; X86-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X86-AVX-NEXT:    sete %bl ## encoding: [0x0f,0x94,0xc3]
+; X86-AVX-NEXT:    movl %ebx, %eax ## encoding: [0x89,0xd8]
+; X86-AVX-NEXT:    popl %ebx ## encoding: [0x5b]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpestriz128:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-SSE-NEXT:    pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
+; X64-SSE-NEXT:    sete %sil ## encoding: [0x40,0x0f,0x94,0xc6]
+; X64-SSE-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpestriz128:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    xorl %esi, %esi ## encoding: [0x31,0xf6]
+; X64-AVX-NEXT:    vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
+; X64-AVX-NEXT:    sete %sil ## encoding: [0x40,0x0f,0x94,0xc6]
+; X64-AVX-NEXT:    movl %esi, %eax ## encoding: [0x89,0xf0]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -216,19 +326,19 @@ declare i32 @llvm.x86.sse42.pcmpestriz12
 
 
 define <16 x i8> @test_x86_sse42_pcmpestrm128(<16 x i8> %a0, <16 x i8> %a2) {
-; SSE42-LABEL: test_x86_sse42_pcmpestrm128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    pcmpestrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0xc1,0x07]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpestrm128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    vpcmpestrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0xc1,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpestrm128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; SSE-NEXT:    pcmpestrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpestrm128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; AVX-NEXT:    vpcmpestrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -236,21 +346,35 @@ declare <16 x i8> @llvm.x86.sse42.pcmpes
 
 
 define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2) {
-; SSE42-LABEL: test_x86_sse42_pcmpestrm128_load:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; SSE42-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; SSE42-NEXT:    pcmpestrm $7, (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0x01,0x07]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpestrm128_load:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; VCHECK-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; VCHECK-NEXT:    vpcmpestrm $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0x01,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpestrm128_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-SSE-NEXT:    pcmpestrm $7, (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0x01,0x07]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpestrm128_load:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX-NEXT:    vpcmpestrm $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0x01,0x07]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpestrm128_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-SSE-NEXT:    pcmpestrm $7, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0x07,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpestrm128_load:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX-NEXT:    vpcmpestrm $7, (%rdi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0x07,0x07]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %1 = load <16 x i8>, <16 x i8>* %a2
   %res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %1, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
@@ -258,17 +382,17 @@ define <16 x i8> @test_x86_sse42_pcmpest
 
 
 define i32 @test_x86_sse42_pcmpistri128(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistri128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
-; SSE42-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpistri128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; VCHECK-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpistri128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
+; SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpistri128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
+; AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -276,32 +400,37 @@ declare i32 @llvm.x86.sse42.pcmpistri128
 
 
 define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistri128_load:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; SSE42-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; SSE42-NEXT:    movdqa (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x01]
-; SSE42-NEXT:    pcmpistri $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0x00,0x07]
-; SSE42-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; AVX2-LABEL: test_x86_sse42_pcmpistri128_load:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; AVX2-NEXT:    vmovdqa (%ecx), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x01]
-; AVX2-NEXT:    vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
-; AVX2-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; AVX2-NEXT:    retl ## encoding: [0xc3]
-;
-; SKX-LABEL: test_x86_sse42_pcmpistri128_load:
-; SKX:       ## %bb.0:
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; SKX-NEXT:    vmovdqa (%ecx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x01]
-; SKX-NEXT:    vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
-; SKX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; SKX-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpistri128_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-SSE-NEXT:    movdqa (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x01]
+; X86-SSE-NEXT:    pcmpistri $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0x00,0x07]
+; X86-SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpistri128_load:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX-NEXT:    vmovdqa (%ecx), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x01]
+; X86-AVX-NEXT:    vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
+; X86-AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpistri128_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x07]
+; X64-SSE-NEXT:    pcmpistri $7, (%rsi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0x06,0x07]
+; X64-SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpistri128_load:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x07]
+; X64-AVX-NEXT:    vpcmpistri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x06,0x07]
+; X64-AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %1 = load <16 x i8>, <16 x i8>* %a0
   %2 = load <16 x i8>, <16 x i8>* %a1
   %res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1]
@@ -310,19 +439,19 @@ define i32 @test_x86_sse42_pcmpistri128_
 
 
 define i32 @test_x86_sse42_pcmpistria128(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistria128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SSE42-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
-; SSE42-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpistria128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; VCHECK-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; VCHECK-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpistria128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
+; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpistria128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
+; AVX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpistria128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -330,19 +459,19 @@ declare i32 @llvm.x86.sse42.pcmpistria12
 
 
 define i32 @test_x86_sse42_pcmpistric128(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistric128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SSE42-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
-; SSE42-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpistric128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; VCHECK-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; VCHECK-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpistric128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
+; SSE-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpistric128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
+; AVX-NEXT:    setb %al ## encoding: [0x0f,0x92,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpistric128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -350,19 +479,19 @@ declare i32 @llvm.x86.sse42.pcmpistric12
 
 
 define i32 @test_x86_sse42_pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistrio128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SSE42-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
-; SSE42-NEXT:    seto %al ## encoding: [0x0f,0x90,0xc0]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpistrio128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; VCHECK-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; VCHECK-NEXT:    seto %al ## encoding: [0x0f,0x90,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpistrio128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
+; SSE-NEXT:    seto %al ## encoding: [0x0f,0x90,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpistrio128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
+; AVX-NEXT:    seto %al ## encoding: [0x0f,0x90,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -370,19 +499,19 @@ declare i32 @llvm.x86.sse42.pcmpistrio12
 
 
 define i32 @test_x86_sse42_pcmpistris128(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistris128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SSE42-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
-; SSE42-NEXT:    sets %al ## encoding: [0x0f,0x98,0xc0]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpistris128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; VCHECK-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; VCHECK-NEXT:    sets %al ## encoding: [0x0f,0x98,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpistris128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
+; SSE-NEXT:    sets %al ## encoding: [0x0f,0x98,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpistris128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
+; AVX-NEXT:    sets %al ## encoding: [0x0f,0x98,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpistris128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -390,19 +519,19 @@ declare i32 @llvm.x86.sse42.pcmpistris12
 
 
 define i32 @test_x86_sse42_pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistriz128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; SSE42-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
-; SSE42-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpistriz128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
-; VCHECK-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; VCHECK-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpistriz128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; SSE-NEXT:    pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
+; SSE-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpistriz128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX-NEXT:    vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
+; AVX-NEXT:    sete %al ## encoding: [0x0f,0x94,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -410,15 +539,15 @@ declare i32 @llvm.x86.sse42.pcmpistriz12
 
 
 define <16 x i8> @test_x86_sse42_pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistrm128:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    pcmpistrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0xc1,0x07]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpistrm128:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    vpcmpistrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0xc1,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; SSE-LABEL: test_x86_sse42_pcmpistrm128:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    pcmpistrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0xc1,0x07]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX-LABEL: test_x86_sse42_pcmpistrm128:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vpcmpistrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0xc1,0x07]
+; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -426,50 +555,78 @@ declare <16 x i8> @llvm.x86.sse42.pcmpis
 
 
 define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) {
-; SSE42-LABEL: test_x86_sse42_pcmpistrm128_load:
-; SSE42:       ## %bb.0:
-; SSE42-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE42-NEXT:    pcmpistrm $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0x00,0x07]
-; SSE42-NEXT:    retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse42_pcmpistrm128_load:
-; VCHECK:       ## %bb.0:
-; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; VCHECK-NEXT:    vpcmpistrm $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0x00,0x07]
-; VCHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-SSE-LABEL: test_x86_sse42_pcmpistrm128_load:
+; X86-SSE:       ## %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    pcmpistrm $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0x00,0x07]
+; X86-SSE-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_x86_sse42_pcmpistrm128_load:
+; X86-AVX:       ## %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vpcmpistrm $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0x00,0x07]
+; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-SSE-LABEL: test_x86_sse42_pcmpistrm128_load:
+; X64-SSE:       ## %bb.0:
+; X64-SSE-NEXT:    pcmpistrm $7, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0x07,0x07]
+; X64-SSE-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse42_pcmpistrm128_load:
+; X64-AVX:       ## %bb.0:
+; X64-AVX-NEXT:    vpcmpistrm $7, (%rdi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0x07,0x07]
+; X64-AVX-NEXT:    retq ## encoding: [0xc3]
   %1 = load <16 x i8>, <16 x i8>* %a1, align 1
   %res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
 
 define i32 @crc32_32_8(i32 %a, i8 %b) nounwind {
-; CHECK-LABEL: crc32_32_8:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT:    crc32b {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf0,0x44,0x24,0x08]
-; CHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-LABEL: crc32_32_8:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    crc32b {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf0,0x44,0x24,0x08]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: crc32_32_8:
+; X64:       ## %bb.0:
+; X64-NEXT:    crc32b %sil, %edi ## encoding: [0xf2,0x40,0x0f,0x38,0xf0,0xfe]
+; X64-NEXT:    movl %edi, %eax ## encoding: [0x89,0xf8]
+; X64-NEXT:    retq ## encoding: [0xc3]
   %tmp = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a, i8 %b)
   ret i32 %tmp
 }
 declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
 
 define i32 @crc32_32_16(i32 %a, i16 %b) nounwind {
-; CHECK-LABEL: crc32_32_16:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT:    crc32w {{[0-9]+}}(%esp), %eax ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
-; CHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-LABEL: crc32_32_16:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    crc32w {{[0-9]+}}(%esp), %eax ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: crc32_32_16:
+; X64:       ## %bb.0:
+; X64-NEXT:    crc32w %si, %edi ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0xfe]
+; X64-NEXT:    movl %edi, %eax ## encoding: [0x89,0xf8]
+; X64-NEXT:    retq ## encoding: [0xc3]
   %tmp = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a, i16 %b)
   ret i32 %tmp
 }
 declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind
 
 define i32 @crc32_32_32(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: crc32_32_32:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT:    crc32l {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
-; CHECK-NEXT:    retl ## encoding: [0xc3]
+; X86-LABEL: crc32_32_32:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    crc32l {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: crc32_32_32:
+; X64:       ## %bb.0:
+; X64-NEXT:    crc32l %esi, %edi ## encoding: [0xf2,0x0f,0x38,0xf1,0xfe]
+; X64-NEXT:    movl %edi, %eax ## encoding: [0x89,0xf8]
+; X64-NEXT:    retq ## encoding: [0xc3]
   %tmp = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a, i32 %b)
   ret i32 %tmp
 }

Modified: llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll Sat Jun  2 10:33:26 2018
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE42
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
 
 declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind
 declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind

Modified: llvm/trunk/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll Sat Jun  2 10:33:26 2018
@@ -1,36 +1,26 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefixes=CHECK,X64
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse4a-builtins.c
 
 define <2 x i64> @test_mm_extracti_si64(<2 x i64> %x) {
-; X32-LABEL: test_mm_extracti_si64:
-; X32:       # %bb.0:
-; X32-NEXT:    extrq $2, $3, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_extracti_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    extrq $2, $3, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_extracti_si64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    extrq $2, $3, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind readnone
 
 define <2 x i64> @test_mm_extract_si64(<2 x i64> %x, <2 x i64> %y) {
-; X32-LABEL: test_mm_extract_si64:
-; X32:       # %bb.0:
-; X32-NEXT:    extrq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_extract_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    extrq %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_extract_si64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    extrq %xmm1, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bc = bitcast <2 x i64> %y to <16 x i8>
   %res = call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %bc)
   ret <2 x i64> %res
@@ -38,41 +28,31 @@ define <2 x i64> @test_mm_extract_si64(<
 declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_inserti_si64(<2 x i64> %x, <2 x i64> %y) {
-; X32-LABEL: test_mm_inserti_si64:
-; X32:       # %bb.0:
-; X32-NEXT:    insertq $6, $5, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_inserti_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    insertq $6, $5, %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_inserti_si64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    insertq $6, $5, %xmm1, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind readnone
 
 define <2 x i64> @test_mm_insert_si64(<2 x i64> %x, <2 x i64> %y) {
-; X32-LABEL: test_mm_insert_si64:
-; X32:       # %bb.0:
-; X32-NEXT:    insertq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_insert_si64:
-; X64:       # %bb.0:
-; X64-NEXT:    insertq %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_insert_si64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    insertq %xmm1, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>) nounwind readnone
 
 define void @test_stream_sd(double* %p, <2 x double> %a) {
-; X32-LABEL: test_stream_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movntsd %xmm0, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_stream_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movntsd %xmm0, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_stream_sd:
 ; X64:       # %bb.0:
@@ -84,11 +64,11 @@ define void @test_stream_sd(double* %p,
 }
 
 define void @test_mm_stream_ss(float* %p, <4 x float> %a) {
-; X32-LABEL: test_mm_stream_ss:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movntss %xmm0, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_mm_stream_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movntss %xmm0, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_stream_ss:
 ; X64:       # %bb.0:

Modified: llvm/trunk/test/CodeGen/X86/sse4a-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse4a-upgrade.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse4a-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse4a-upgrade.ll Sat Jun  2 10:33:26 2018
@@ -1,20 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=X64
 
 define void @test_movntss(i8* %p, <4 x float> %a) nounwind optsize ssp {
-; X32-LABEL: test_movntss:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movntss %xmm0, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_movntss:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movntss %xmm0, (%eax) # encoding: [0xf3,0x0f,0x2b,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_movntss:
 ; X64:       # %bb.0:
-; X64-NEXT:    movntss %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X64-NEXT:    movntss %xmm0, (%rdi) # encoding: [0xf3,0x0f,0x2b,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
   tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) nounwind
   ret void
 }
@@ -22,16 +22,16 @@ define void @test_movntss(i8* %p, <4 x f
 declare void @llvm.x86.sse4a.movnt.ss(i8*, <4 x float>)
 
 define void @test_movntsd(i8* %p, <2 x double> %a) nounwind optsize ssp {
-; X32-LABEL: test_movntsd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movntsd %xmm0, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_movntsd:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movntsd %xmm0, (%eax) # encoding: [0xf2,0x0f,0x2b,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_movntsd:
 ; X64:       # %bb.0:
-; X64-NEXT:    movntsd %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X64-NEXT:    movntsd %xmm0, (%rdi) # encoding: [0xf2,0x0f,0x2b,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
   tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) nounwind
   ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/sse4a.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse4a.ll?rev=333828&r1=333827&r2=333828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse4a.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse4a.ll Sat Jun  2 10:33:26 2018
@@ -1,49 +1,44 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32 --check-prefix=X32-SSE
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
 
 define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
-; X32-LABEL: test_extrqi:
-; X32:       # %bb.0:
-; X32-NEXT:    extrq $2, $3, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_extrqi:
-; X64:       # %bb.0:
-; X64-NEXT:    extrq $2, $3, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_extrqi:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
   ret <2 x i64> %1
 }
 
 define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
-; X32-SSE-LABEL: test_extrqi_domain:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    movdqa (%eax), %xmm0
-; X32-SSE-NEXT:    extrq $2, $3, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X32-AVX-LABEL: test_extrqi_domain:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovdqa (%eax), %xmm0
-; X32-AVX-NEXT:    extrq $2, $3, %xmm0
-; X32-AVX-NEXT:    retl
+; X86-SSE-LABEL: test_extrqi_domain:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movdqa (%eax), %xmm0 # encoding: [0x66,0x0f,0x6f,0x00]
+; X86-SSE-NEXT:    extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; X86-SSE-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_extrqi_domain:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovdqa (%eax), %xmm0 # encoding: [0xc5,0xf9,0x6f,0x00]
+; X86-AVX-NEXT:    extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: test_extrqi_domain:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movdqa (%rdi), %xmm0
-; X64-SSE-NEXT:    extrq $2, $3, %xmm0
-; X64-SSE-NEXT:    retq
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm0 # encoding: [0x66,0x0f,0x6f,0x07]
+; X64-SSE-NEXT:    extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; X64-SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_extrqi_domain:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm0
-; X64-AVX-NEXT:    extrq $2, $3, %xmm0
-; X64-AVX-NEXT:    retq
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm0 # encoding: [0xc5,0xf9,0x6f,0x07]
+; X64-AVX-NEXT:    extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = load <2 x i64>, <2 x i64> *%p
   %2 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %1, i8 3, i8 2)
   ret <2 x i64> %2
@@ -52,50 +47,45 @@ define <2 x i64> @test_extrqi_domain(<2
 declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind
 
 define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
-; X32-LABEL: test_extrq:
-; X32:       # %bb.0:
-; X32-NEXT:    extrq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_extrq:
-; X64:       # %bb.0:
-; X64-NEXT:    extrq %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_extrq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    extrq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x79,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %1 = bitcast <2 x i64> %y to <16 x i8>
   %2 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %1) nounwind
   ret <2 x i64> %2
 }
 
 define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
-; X32-SSE-LABEL: test_extrq_domain:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    movdqa (%eax), %xmm1
-; X32-SSE-NEXT:    extrq %xmm0, %xmm1
-; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X32-AVX-LABEL: test_extrq_domain:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovdqa (%eax), %xmm1
-; X32-AVX-NEXT:    extrq %xmm0, %xmm1
-; X32-AVX-NEXT:    vmovdqa %xmm1, %xmm0
-; X32-AVX-NEXT:    retl
+; X86-SSE-LABEL: test_extrq_domain:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
+; X86-SSE-NEXT:    extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
+; X86-SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X86-SSE-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_extrq_domain:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
+; X86-AVX-NEXT:    extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
+; X86-AVX-NEXT:    vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: test_extrq_domain:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movdqa (%rdi), %xmm1
-; X64-SSE-NEXT:    extrq %xmm0, %xmm1
-; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
+; X64-SSE-NEXT:    extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X64-SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_extrq_domain:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1
-; X64-AVX-NEXT:    extrq %xmm0, %xmm1
-; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0
-; X64-AVX-NEXT:    retq
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
+; X64-AVX-NEXT:    extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
+; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = load <2 x i64>, <2 x i64> *%p
   %2 = bitcast <2 x i64> %y to <16 x i8>
   %3 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %1, <16 x i8> %2) nounwind
@@ -105,49 +95,44 @@ define <2 x i64> @test_extrq_domain(<2 x
 declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind
 
 define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
-; X32-LABEL: test_insertqi:
-; X32:       # %bb.0:
-; X32-NEXT:    insertq $6, $5, %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_insertqi:
-; X64:       # %bb.0:
-; X64-NEXT:    insertq $6, $5, %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_insertqi:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    insertq $6, $5, %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x78,0xc1,0x05,0x06]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
   ret <2 x i64> %1
 }
 
 define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
-; X32-SSE-LABEL: test_insertqi_domain:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    movdqa (%eax), %xmm1
-; X32-SSE-NEXT:    insertq $6, $5, %xmm0, %xmm1
-; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X32-AVX-LABEL: test_insertqi_domain:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovdqa (%eax), %xmm1
-; X32-AVX-NEXT:    insertq $6, $5, %xmm0, %xmm1
-; X32-AVX-NEXT:    vmovdqa %xmm1, %xmm0
-; X32-AVX-NEXT:    retl
+; X86-SSE-LABEL: test_insertqi_domain:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
+; X86-SSE-NEXT:    insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
+; X86-SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X86-SSE-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_insertqi_domain:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
+; X86-AVX-NEXT:    insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
+; X86-AVX-NEXT:    vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: test_insertqi_domain:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movdqa (%rdi), %xmm1
-; X64-SSE-NEXT:    insertq $6, $5, %xmm0, %xmm1
-; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
+; X64-SSE-NEXT:    insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X64-SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_insertqi_domain:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1
-; X64-AVX-NEXT:    insertq $6, $5, %xmm0, %xmm1
-; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0
-; X64-AVX-NEXT:    retq
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
+; X64-AVX-NEXT:    insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
+; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = load <2 x i64>, <2 x i64> *%p
   %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %y, i8 5, i8 6)
   ret <2 x i64> %2
@@ -156,49 +141,44 @@ define <2 x i64> @test_insertqi_domain(<
 declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
 
 define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
-; X32-LABEL: test_insertq:
-; X32:       # %bb.0:
-; X32-NEXT:    insertq %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_insertq:
-; X64:       # %bb.0:
-; X64-NEXT:    insertq %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_insertq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    insertq %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x79,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind
   ret <2 x i64> %1
 }
 
 define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
-; X32-SSE-LABEL: test_insertq_domain:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    movdqa (%eax), %xmm1
-; X32-SSE-NEXT:    insertq %xmm0, %xmm1
-; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X32-AVX-LABEL: test_insertq_domain:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovdqa (%eax), %xmm1
-; X32-AVX-NEXT:    insertq %xmm0, %xmm1
-; X32-AVX-NEXT:    vmovdqa %xmm1, %xmm0
-; X32-AVX-NEXT:    retl
+; X86-SSE-LABEL: test_insertq_domain:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT:    movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
+; X86-SSE-NEXT:    insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
+; X86-SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X86-SSE-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_insertq_domain:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
+; X86-AVX-NEXT:    insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
+; X86-AVX-NEXT:    vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: test_insertq_domain:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movdqa (%rdi), %xmm1
-; X64-SSE-NEXT:    insertq %xmm0, %xmm1
-; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
+; X64-SSE-NEXT:    insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X64-SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_insertq_domain:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1
-; X64-AVX-NEXT:    insertq %xmm0, %xmm1
-; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0
-; X64-AVX-NEXT:    retq
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
+; X64-AVX-NEXT:    insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
+; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = load <2 x i64>, <2 x i64> *%p
   %2 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %1, <2 x i64> %y) nounwind
   ret <2 x i64> %2




More information about the llvm-commits mailing list