[llvm] r333830 - [X86][SSE4] Tweak rL333828 sse41/sse42 cleanup to recover SKX/EVEX2VEX testing

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 2 11:01:09 PDT 2018


Author: rksimon
Date: Sat Jun  2 11:01:09 2018
New Revision: 333830

URL: http://llvm.org/viewvc/llvm-project?rev=333830&view=rev
Log:
[X86][SSE4] Tweak rL333828 sse41/sse42 cleanup to recover SKX/EVEX2VEX testing

Just testing for avx512f was missing the tests for EVEX TO VEX Compression encoding etc.

Modified:
    llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
    llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll
    llvm/trunk/test/CodeGen/X86/sse41.ll
    llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll
    llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll
    llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll

Modified: llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll?rev=333830&r1=333829&r2=333830&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll Sat Jun  2 11:01:09 2018
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
 ; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
-; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
 ; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
 ; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse41-builtins.c
 
@@ -182,10 +182,16 @@ define <2 x i64> @test_mm_cmpeq_epi64(<2
 ; SSE-NEXT:    pcmpeqq %xmm1, %xmm0
 ; SSE-NEXT:    ret{{[l|q]}}
 ;
-; AVX-LABEL: test_mm_cmpeq_epi64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; AVX1-LABEL: test_mm_cmpeq_epi64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpeq_epi64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqq %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %cmp = icmp eq <2 x i64> %a0, %a1
   %res = sext <2 x i1> %cmp to <2 x i64>
   ret <2 x i64> %res
@@ -858,11 +864,10 @@ define <2 x i64> @test_mm_mul_epi32(<2 x
 ; AVX512-LABEL: test_mm_mul_epi32:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsllq $32, %xmm0, %xmm0
-; AVX512-NEXT:    vpsraq $32, %zmm0, %zmm0
+; AVX512-NEXT:    vpsraq $32, %xmm0, %xmm0
 ; AVX512-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX512-NEXT:    vpsraq $32, %zmm1, %zmm1
-; AVX512-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    vpsraq $32, %xmm1, %xmm1
+; AVX512-NEXT:    vpmullq %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    ret{{[l|q]}}
   %A = shl <2 x i64> %a0, <i64 32, i64 32>
   %A1 = ashr exact <2 x i64> %A, <i64 32, i64 32>

Modified: llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll?rev=333830&r1=333829&r2=333830&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll Sat Jun  2 11:01:09 2018
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 ; This test works just like the non-upgrade one except that it only checks
 ; forms which require auto-upgrading.
@@ -114,21 +114,32 @@ define <2 x i64> @test_x86_sse41_movntdq
 ; X86-SSE-NEXT:    movntdqa (%eax), %xmm0 ## encoding: [0x66,0x0f,0x38,0x2a,0x00]
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: test_x86_sse41_movntdqa:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX-NEXT:    vmovntdqa (%eax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2a,0x00]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: test_x86_sse41_movntdqa:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovntdqa (%eax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2a,0x00]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse41_movntdqa:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovntdqa (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2a,0x00]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: test_x86_sse41_movntdqa:
 ; X64-SSE:       ## %bb.0:
 ; X64-SSE-NEXT:    movntdqa (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x38,0x2a,0x07]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: test_x86_sse41_movntdqa:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovntdqa (%rdi), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2a,0x07]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: test_x86_sse41_movntdqa:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovntdqa (%rdi), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2a,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse41_movntdqa:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovntdqa (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2a,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %arg0 = bitcast <2 x i64>* %a0 to i8*
   %res = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %arg0)
   ret <2 x i64> %res
@@ -176,10 +187,15 @@ define <4 x i32> @test_x86_sse41_pmovsxb
 ; SSE-NEXT:    pmovsxbd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x21,0xc0]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovsxbd:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovsxbd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x21,0xc0]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovsxbd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x21,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovsxbd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovsxbd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x21,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -192,10 +208,15 @@ define <2 x i64> @test_x86_sse41_pmovsxb
 ; SSE-NEXT:    pmovsxbq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x22,0xc0]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovsxbq:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovsxbq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x22,0xc0]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovsxbq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovsxbq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x22,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovsxbq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovsxbq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x22,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -208,10 +229,15 @@ define <8 x i16> @test_x86_sse41_pmovsxb
 ; SSE-NEXT:    pmovsxbw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x20,0xc0]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovsxbw:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovsxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x20,0xc0]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovsxbw:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovsxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x20,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovsxbw:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovsxbw %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x20,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -224,10 +250,15 @@ define <2 x i64> @test_x86_sse41_pmovsxd
 ; SSE-NEXT:    pmovsxdq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x25,0xc0]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovsxdq:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovsxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x25,0xc0]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovsxdq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x25,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovsxdq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovsxdq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x25,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -240,10 +271,15 @@ define <4 x i32> @test_x86_sse41_pmovsxw
 ; SSE-NEXT:    pmovsxwd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x23,0xc0]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovsxwd:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovsxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x23,0xc0]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovsxwd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x23,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovsxwd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovsxwd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x23,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -256,10 +292,15 @@ define <2 x i64> @test_x86_sse41_pmovsxw
 ; SSE-NEXT:    pmovsxwq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x24,0xc0]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovsxwq:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovsxwq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x24,0xc0]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovsxwq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovsxwq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x24,0xc0]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovsxwq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovsxwq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x24,0xc0]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -273,11 +314,17 @@ define <4 x i32> @test_x86_sse41_pmovzxb
 ; SSE-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovzxbd:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovzxbd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x31,0xc0]
-; AVX-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovzxbd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovzxbd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x31,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovzxbd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovzxbd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x31,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -291,11 +338,17 @@ define <2 x i64> @test_x86_sse41_pmovzxb
 ; SSE-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovzxbq:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovzxbq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0xc0]
-; AVX-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovzxbq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovzxbq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovzxbq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovzxbq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x32,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -309,11 +362,17 @@ define <8 x i16> @test_x86_sse41_pmovzxb
 ; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovzxbw:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovzxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x30,0xc0]
-; AVX-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovzxbw:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovzxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x30,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovzxbw:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovzxbw %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x30,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -327,11 +386,17 @@ define <2 x i64> @test_x86_sse41_pmovzxd
 ; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovzxdq:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovzxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x35,0xc0]
-; AVX-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovzxdq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovzxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x35,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovzxdq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovzxdq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x35,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -345,11 +410,17 @@ define <4 x i32> @test_x86_sse41_pmovzxw
 ; SSE-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovzxwd:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovzxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x33,0xc0]
-; AVX-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovzxwd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovzxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x33,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovzxwd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovzxwd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x33,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -363,11 +434,17 @@ define <2 x i64> @test_x86_sse41_pmovzxw
 ; SSE-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmovzxwq:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmovzxwq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x34,0xc0]
-; AVX-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmovzxwq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmovzxwq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x34,0xc0]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmovzxwq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmovzxwq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x34,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -379,10 +456,15 @@ define <16 x i8> @max_epi8(<16 x i8> %a0
 ; SSE-NEXT:    pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: max_epi8:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: max_epi8:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: max_epi8:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %res
 }
@@ -394,10 +476,15 @@ define <16 x i8> @min_epi8(<16 x i8> %a0
 ; SSE-NEXT:    pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: min_epi8:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: min_epi8:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: min_epi8:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x38,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %res
 }
@@ -409,10 +496,15 @@ define <8 x i16> @max_epu16(<8 x i16> %a
 ; SSE-NEXT:    pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: max_epu16:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: max_epu16:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: max_epu16:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %res
 }
@@ -424,10 +516,15 @@ define <8 x i16> @min_epu16(<8 x i16> %a
 ; SSE-NEXT:    pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: min_epu16:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: min_epu16:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: min_epu16:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %res
 }
@@ -439,10 +536,15 @@ define <4 x i32> @max_epi32(<4 x i32> %a
 ; SSE-NEXT:    pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: max_epi32:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: max_epi32:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: max_epi32:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
   ret <4 x i32> %res
 }
@@ -454,10 +556,15 @@ define <4 x i32> @min_epi32(<4 x i32> %a
 ; SSE-NEXT:    pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: min_epi32:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: min_epi32:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: min_epi32:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x39,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
   ret <4 x i32> %res
 }
@@ -469,10 +576,15 @@ define <4 x i32> @max_epu32(<4 x i32> %a
 ; SSE-NEXT:    pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: max_epu32:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: max_epu32:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: max_epu32:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
   ret <4 x i32> %res
 }
@@ -484,10 +596,15 @@ define <4 x i32> @min_epu32(<4 x i32> %a
 ; SSE-NEXT:    pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: min_epu32:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: min_epu32:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: min_epu32:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
   ret <4 x i32> %res
 }
@@ -500,10 +617,15 @@ define <2 x i64> @test_x86_sse41_pmuldq(
 ; SSE-NEXT:    pmuldq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x28,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmuldq:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x28,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmuldq:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x28,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmuldq:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x28,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll?rev=333830&r1=333829&r2=333830&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41-intrinsics-x86.ll Sat Jun  2 11:01:09 2018
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
 ; SSE-LABEL: test_x86_sse41_blendvpd:
@@ -123,10 +123,15 @@ define <8 x i16> @test_x86_sse41_packusd
 ; SSE-NEXT:    packusdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x2b,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_packusdw:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_packusdw:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_packusdw:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -141,12 +146,19 @@ define <8 x i16> @test_x86_sse41_packusd
 ; X86-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI7_0, kind: FK_Data_4
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: test_x86_sse41_packusdw_fold:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
-; X86-AVX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: test_x86_sse41_packusdw_fold:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
+; X86-AVX1-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse41_packusdw_fold:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vmovaps LCPI7_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,65535,65535,0,0]
+; X86-AVX512-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X86-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: test_x86_sse41_packusdw_fold:
 ; X64-SSE:       ## %bb.0:
@@ -155,12 +167,19 @@ define <8 x i16> @test_x86_sse41_packusd
 ; X64-SSE-NEXT:    ## fixup A - offset: 3, value: LCPI7_0-4, kind: reloc_riprel_4byte
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: test_x86_sse41_packusdw_fold:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
-; X64-AVX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI7_0-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: test_x86_sse41_packusdw_fold:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
+; X64-AVX1-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX1-NEXT:    ## fixup A - offset: 4, value: LCPI7_0-4, kind: reloc_riprel_4byte
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse41_packusdw_fold:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovaps {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,65535,65535,0,0]
+; X64-AVX512-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; X64-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI7_0-4, kind: reloc_riprel_4byte
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
   ret <8 x i16> %res
 }
@@ -207,10 +226,15 @@ define <16 x i8> @test_x86_sse41_pmaxsb(
 ; SSE-NEXT:    pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmaxsb:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmaxsb:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmaxsb:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -223,10 +247,15 @@ define <4 x i32> @test_x86_sse41_pmaxsd(
 ; SSE-NEXT:    pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmaxsd:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmaxsd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmaxsd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -239,10 +268,15 @@ define <4 x i32> @test_x86_sse41_pmaxud(
 ; SSE-NEXT:    pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmaxud:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmaxud:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmaxud:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -255,10 +289,15 @@ define <8 x i16> @test_x86_sse41_pmaxuw(
 ; SSE-NEXT:    pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pmaxuw:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pmaxuw:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pmaxuw:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -271,10 +310,15 @@ define <16 x i8> @test_x86_sse41_pminsb(
 ; SSE-NEXT:    pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pminsb:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pminsb:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pminsb:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x38,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <16 x i8> %res
 }
@@ -287,10 +331,15 @@ define <4 x i32> @test_x86_sse41_pminsd(
 ; SSE-NEXT:    pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pminsd:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pminsd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pminsd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x39,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -303,10 +352,15 @@ define <4 x i32> @test_x86_sse41_pminud(
 ; SSE-NEXT:    pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pminud:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pminud:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pminud:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -319,10 +373,15 @@ define <8 x i16> @test_x86_sse41_pminuw(
 ; SSE-NEXT:    pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_pminuw:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_pminuw:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_pminuw:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -395,10 +454,15 @@ define <2 x double> @test_x86_sse41_roun
 ; SSE-NEXT:    roundpd $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x07]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_round_pd:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_round_pd:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_round_pd:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vroundpd $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -411,10 +475,15 @@ define <4 x float> @test_x86_sse41_round
 ; SSE-NEXT:    roundps $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x07]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_x86_sse41_round_ps:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_x86_sse41_round_ps:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse41_round_ps:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vroundps $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }

Modified: llvm/trunk/test/CodeGen/X86/sse41.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41.ll?rev=333830&r1=333829&r2=333830&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41.ll Sat Jun  2 11:01:09 2018
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 @g16 = external global i16
 
@@ -14,20 +14,30 @@ define <4 x i32> @pinsrd_1(i32 %s, <4 x
 ; X86-SSE-NEXT:    pinsrd $1, {{[0-9]+}}(%esp), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x44,0x24,0x04,0x01]
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: pinsrd_1:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x01]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: pinsrd_1:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x01]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: pinsrd_1:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x01]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: pinsrd_1:
 ; X64-SSE:       ## %bb.0:
 ; X64-SSE-NEXT:    pinsrd $1, %edi, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0xc7,0x01]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: pinsrd_1:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x01]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: pinsrd_1:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x01]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: pinsrd_1:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x01]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1
   ret <4 x i32> %tmp1
 }
@@ -38,20 +48,30 @@ define <16 x i8> @pinsrb_1(i8 %s, <16 x
 ; X86-SSE-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x20,0x44,0x24,0x04,0x01]
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: pinsrb_1:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0x44,0x24,0x04,0x01]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: pinsrb_1:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0x44,0x24,0x04,0x01]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: pinsrb_1:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0x44,0x24,0x04,0x01]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: pinsrb_1:
 ; X64-SSE:       ## %bb.0:
 ; X64-SSE-NEXT:    pinsrb $1, %edi, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x20,0xc7,0x01]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: pinsrb_1:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vpinsrb $1, %edi, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x01]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: pinsrb_1:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vpinsrb $1, %edi, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x01]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: pinsrb_1:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vpinsrb $1, %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x01]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1
   ret <16 x i8> %tmp1
 }
@@ -65,13 +85,21 @@ define <2 x i64> @pmovzxbq_1() nounwind
 ; X86-SSE-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: pmovzxbq_1:
-; X86-AVX:       ## %bb.0: ## %entry
-; X86-AVX-NEXT:    movl L_g16$non_lazy_ptr, %eax ## encoding: [0xa1,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 1, value: L_g16$non_lazy_ptr, kind: FK_Data_4
-; X86-AVX-NEXT:    vpmovzxbq (%eax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0x00]
-; X86-AVX-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: pmovzxbq_1:
+; X86-AVX1:       ## %bb.0: ## %entry
+; X86-AVX1-NEXT:    movl L_g16$non_lazy_ptr, %eax ## encoding: [0xa1,A,A,A,A]
+; X86-AVX1-NEXT:    ## fixup A - offset: 1, value: L_g16$non_lazy_ptr, kind: FK_Data_4
+; X86-AVX1-NEXT:    vpmovzxbq (%eax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0x00]
+; X86-AVX1-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: pmovzxbq_1:
+; X86-AVX512:       ## %bb.0: ## %entry
+; X86-AVX512-NEXT:    movl L_g16$non_lazy_ptr, %eax ## encoding: [0xa1,A,A,A,A]
+; X86-AVX512-NEXT:    ## fixup A - offset: 1, value: L_g16$non_lazy_ptr, kind: FK_Data_4
+; X86-AVX512-NEXT:    vpmovzxbq (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x32,0x00]
+; X86-AVX512-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: pmovzxbq_1:
 ; X64-SSE:       ## %bb.0: ## %entry
@@ -81,13 +109,21 @@ define <2 x i64> @pmovzxbq_1() nounwind
 ; X64-SSE-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: pmovzxbq_1:
-; X64-AVX:       ## %bb.0: ## %entry
-; X64-AVX-NEXT:    movq _g16@{{.*}}(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 3, value: _g16 at GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
-; X64-AVX-NEXT:    vpmovzxbq (%rax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0x00]
-; X64-AVX-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: pmovzxbq_1:
+; X64-AVX1:       ## %bb.0: ## %entry
+; X64-AVX1-NEXT:    movq _g16@{{.*}}(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
+; X64-AVX1-NEXT:    ## fixup A - offset: 3, value: _g16 at GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
+; X64-AVX1-NEXT:    vpmovzxbq (%rax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0x00]
+; X64-AVX1-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: pmovzxbq_1:
+; X64-AVX512:       ## %bb.0: ## %entry
+; X64-AVX512-NEXT:    movq _g16@{{.*}}(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
+; X64-AVX512-NEXT:    ## fixup A - offset: 3, value: _g16 at GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
+; X64-AVX512-NEXT:    vpmovzxbq (%rax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x32,0x00]
+; X64-AVX512-NEXT:    ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
 entry:
 	%0 = load i16, i16* @g16, align 2		; <i16> [#uses=1]
 	%1 = insertelement <8 x i16> undef, i16 %0, i32 0		; <<8 x i16>> [#uses=1]
@@ -170,7 +206,7 @@ define float @ext_1(<4 x float> %v) noun
 ; X86-AVX512-LABEL: ext_1:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    pushl %eax ## encoding: [0x50]
-; X86-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X86-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[3,1,2,3]
 ; X86-AVX512-NEXT:    vaddss LCPI5_0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
 ; X86-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
@@ -197,7 +233,7 @@ define float @ext_1(<4 x float> %v) noun
 ;
 ; X64-AVX512-LABEL: ext_1:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X64-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[3,1,2,3]
 ; X64-AVX512-NEXT:    vaddss {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
 ; X64-AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
@@ -231,7 +267,7 @@ define float @ext_2(<4 x float> %v) noun
 ; X86-AVX512-LABEL: ext_2:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    pushl %eax ## encoding: [0x50]
-; X86-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X86-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[3,1,2,3]
 ; X86-AVX512-NEXT:    vmovss %xmm0, (%esp) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x04,0x24]
 ; X86-AVX512-NEXT:    flds (%esp) ## encoding: [0xd9,0x04,0x24]
@@ -244,11 +280,17 @@ define float @ext_2(<4 x float> %v) noun
 ; X64-SSE-NEXT:    ## xmm0 = xmm0[3,1,2,3]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: ext_2:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
-; X64-AVX-NEXT:    ## xmm0 = xmm0[3,1,2,3]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: ext_2:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: ext_2:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %s = extractelement <4 x float> %v, i32 3
   ret float %s
 }
@@ -259,10 +301,15 @@ define i32 @ext_3(<4 x i32> %v) nounwind
 ; SSE-NEXT:    extractps $3, %xmm0, %eax ## encoding: [0x66,0x0f,0x3a,0x17,0xc0,0x03]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: ext_3:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vextractps $3, %xmm0, %eax ## encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: ext_3:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vextractps $3, %xmm0, %eax ## encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: ext_3:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vextractps $3, %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %i = extractelement <4 x i32> %v, i32 3
   ret i32 %i
 }
@@ -500,9 +547,9 @@ define <2 x float> @buildvector(<2 x flo
 ;
 ; AVX512-LABEL: buildvector:
 ; AVX512:       ## %bb.0: ## %entry
-; AVX512-NEXT:    vmovshdup %xmm0, %xmm2 ## encoding: [0xc5,0xfa,0x16,0xd0]
+; AVX512-NEXT:    vmovshdup %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x16,0xd0]
 ; AVX512-NEXT:    ## xmm2 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vmovshdup %xmm1, %xmm3 ## encoding: [0xc5,0xfa,0x16,0xd9]
+; AVX512-NEXT:    vmovshdup %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x16,0xd9]
 ; AVX512-NEXT:    ## xmm3 = xmm1[1,1,3,3]
 ; AVX512-NEXT:    vaddss %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xea,0x58,0xd3]
 ; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0xc1]
@@ -541,7 +588,7 @@ define <4 x float> @insertps_from_shuffl
 ; X86-AVX512-LABEL: insertps_from_shufflevector_1:
 ; X86-AVX512:       ## %bb.0: ## %entry
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x08]
 ; X86-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
@@ -562,7 +609,7 @@ define <4 x float> @insertps_from_shuffl
 ;
 ; X64-AVX512-LABEL: insertps_from_shufflevector_1:
 ; X64-AVX512:       ## %bb.0: ## %entry
-; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
 ; X64-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
@@ -619,7 +666,7 @@ define <4 x i32> @pinsrd_from_shufflevec
 ; X86-AVX512-LABEL: pinsrd_from_shufflevector_i32:
 ; X86-AVX512:       ## %bb.0: ## %entry
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT:    vbroadcastss (%eax), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x08]
+; X86-AVX512-NEXT:    vbroadcastss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x08]
 ; X86-AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
@@ -642,7 +689,7 @@ define <4 x i32> @pinsrd_from_shufflevec
 ;
 ; X64-AVX512-LABEL: pinsrd_from_shufflevector_i32:
 ; X64-AVX512:       ## %bb.0: ## %entry
-; X64-AVX512-NEXT:    vbroadcastss (%rdi), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x0f]
+; X64-AVX512-NEXT:    vbroadcastss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x0f]
 ; X64-AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
@@ -661,13 +708,21 @@ define <4 x i32> @insertps_from_shufflev
 ; SSE-NEXT:    ## xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: insertps_from_shufflevector_i32_2:
-; AVX:       ## %bb.0: ## %entry
-; AVX-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
-; AVX-NEXT:    ## xmm1 = xmm1[2,3,0,1]
-; AVX-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
-; AVX-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: insertps_from_shufflevector_i32_2:
+; AVX1:       ## %bb.0: ## %entry
+; AVX1-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; AVX1-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: insertps_from_shufflevector_i32_2:
+; AVX512:       ## %bb.0: ## %entry
+; AVX512-NEXT:    vpermilps $78, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; AVX512-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; AVX512-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 entry:
   %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
   ret <4 x i32> %vecinit6
@@ -726,21 +781,32 @@ define <4 x i32> @insertps_from_load_ins
 ; X86-SSE-NEXT:    pinsrd $2, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x00,0x02]
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: insertps_from_load_ins_elt_undef_i32:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX-NEXT:    vpinsrd $2, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x00,0x02]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vpinsrd $2, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x00,0x02]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vpinsrd $2, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x00,0x02]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: insertps_from_load_ins_elt_undef_i32:
 ; X64-SSE:       ## %bb.0:
 ; X64-SSE-NEXT:    pinsrd $2, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x07,0x02]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: insertps_from_load_ins_elt_undef_i32:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vpinsrd $2, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x07,0x02]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vpinsrd $2, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x07,0x02]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vpinsrd $2, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x07,0x02]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load i32, i32* %b, align 4
   %2 = insertelement <4 x i32> undef, i32 %1, i32 0
   %result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
@@ -756,12 +822,19 @@ define <4 x float> @shuf_XYZ0(<4 x float
 ; SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: shuf_XYZ0:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
-; AVX-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: shuf_XYZ0:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: shuf_XYZ0:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %x, i32 1
@@ -953,8 +1026,8 @@ define <4 x float> @shuf_X0YC(<4 x float
 ;
 ; AVX512-LABEL: shuf_X0YC:
 ; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
-; AVX512-NEXT:    vunpcklps %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x14,0xc2]
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x57,0xd2]
+; AVX512-NEXT:    vunpcklps %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x14,0xc2]
 ; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 ; AVX512-NEXT:    vinsertps $176, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb0]
 ; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[2]
@@ -975,12 +1048,19 @@ define <4 x i32> @i32_shuf_XYZ0(<4 x i32
 ; SSE-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: i32_shuf_XYZ0:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
-; AVX-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: i32_shuf_XYZ0:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: i32_shuf_XYZ0:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecext1 = extractelement <4 x i32> %x, i32 1
@@ -1028,14 +1108,23 @@ define <4 x i32> @i32_shuf_XYY0(<4 x i32
 ; SSE-NEXT:    ## xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: i32_shuf_XYY0:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpermilps $212, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xd4]
-; AVX-NEXT:    ## xmm0 = xmm0[0,1,1,3]
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
-; AVX-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: i32_shuf_XYY0:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpermilps $212, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xd4]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,1,3]
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: i32_shuf_XYY0:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpermilps $212, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xd4]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,1,3]
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecext1 = extractelement <4 x i32> %x, i32 1
@@ -1055,14 +1144,23 @@ define <4 x i32> @i32_shuf_XYW0(<4 x i32
 ; SSE-NEXT:    ## xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: i32_shuf_XYW0:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpermilps $244, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xf4]
-; AVX-NEXT:    ## xmm0 = xmm0[0,1,3,3]
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
-; AVX-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: i32_shuf_XYW0:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpermilps $244, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xf4]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,3,3]
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: i32_shuf_XYW0:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpermilps $244, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xf4]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,3,3]
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
+; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecext1 = extractelement <4 x i32> %x, i32 1
@@ -1083,14 +1181,23 @@ define <4 x i32> @i32_shuf_W00W(<4 x i32
 ; SSE-NEXT:    ## xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: i32_shuf_W00W:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
-; AVX-NEXT:    ## xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX-NEXT:    vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
-; AVX-NEXT:    ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: i32_shuf_W00W:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpermilps $231, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; AVX1-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
+; AVX1-NEXT:    ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: i32_shuf_W00W:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpermilps $231, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xe7]
+; AVX512-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x i32> %x, i32 3
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
   %vecinit2 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -1124,10 +1231,10 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32
 ;
 ; AVX512-LABEL: i32_shuf_X00A:
 ; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x57,0xd2]
 ; AVX512-NEXT:    vmovss %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xea,0x10,0xc0]
 ; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm2[1,2,3]
-; AVX512-NEXT:    vbroadcastss %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0xc9]
+; AVX512-NEXT:    vbroadcastss %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc9]
 ; AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
 ; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
 ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
@@ -1160,8 +1267,8 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32
 ;
 ; AVX512-LABEL: i32_shuf_X00X:
 ; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x18,0xc0]
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc0]
 ; AVX512-NEXT:    vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
 ; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
 ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
@@ -1196,9 +1303,9 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32
 ;
 ; AVX512-LABEL: i32_shuf_X0YC:
 ; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vpmovzxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x35,0xc0]
+; AVX512-NEXT:    vpmovzxdq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x35,0xc0]
 ; AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX512-NEXT:    vpshufd $164, %xmm1, %xmm1 ## encoding: [0xc5,0xf9,0x70,0xc9,0xa4]
+; AVX512-NEXT:    vpshufd $164, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x70,0xc9,0xa4]
 ; AVX512-NEXT:    ## xmm1 = xmm1[0,1,2,2]
 ; AVX512-NEXT:    vpblendd $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x02,0xc1,0x08]
 ; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[3]
@@ -1221,13 +1328,21 @@ define < 4 x float> @test_insertps_no_un
 ; SSE-NEXT:    maxps %xmm1, %xmm0 ## encoding: [0x0f,0x5f,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: test_insertps_no_undef:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc9,0x08]
-; AVX-NEXT:    ## xmm1 = xmm0[0,1,2],xmm1[3]
-; AVX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5f,0xc1]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: test_insertps_no_undef:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc9,0x08]
+; AVX1-NEXT:    ## xmm1 = xmm0[0,1,2],xmm1[3]
+; AVX1-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5f,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_insertps_no_undef:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vblendps $8, %xmm1, %xmm0, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc9,0x08]
+; AVX512-NEXT:    ## xmm1 = xmm0[0,1,2],xmm1[3]
+; AVX512-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %x, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
   %vecext1 = extractelement <4 x float> %x, i32 1
@@ -1249,12 +1364,19 @@ define <8 x i16> @blendvb_fallback(<8 x
 ; SSE-NEXT:    movdqa %xmm2, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc2]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: blendvb_fallback:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vpsllw $15, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x0f]
-; AVX-NEXT:    vpsraw $15, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x0f]
-; AVX-NEXT:    vpblendvb %xmm0, %xmm1, %xmm2, %xmm0 ## encoding: [0xc4,0xe3,0x69,0x4c,0xc1,0x00]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: blendvb_fallback:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsllw $15, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x0f]
+; AVX1-NEXT:    vpsraw $15, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x0f]
+; AVX1-NEXT:    vpblendvb %xmm0, %xmm1, %xmm2, %xmm0 ## encoding: [0xc4,0xe3,0x69,0x4c,0xc1,0x00]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: blendvb_fallback:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsllw $15, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x0f]
+; AVX512-NEXT:    vpmovw2m %xmm0, %k1 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc8]
+; AVX512-NEXT:    vpblendmw %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x66,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %ret = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %y
   ret <8 x i16> %ret
 }
@@ -1280,7 +1402,7 @@ define <4 x float> @insertps_from_vector
 ; X86-AVX512-LABEL: insertps_from_vector_load:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x08]
 ; X86-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
@@ -1301,7 +1423,7 @@ define <4 x float> @insertps_from_vector
 ;
 ; X64-AVX512-LABEL: insertps_from_vector_load:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
 ; X64-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
@@ -1332,7 +1454,7 @@ define <4 x float> @insertps_from_vector
 ; X86-AVX512-LABEL: insertps_from_vector_load_offset:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x08]
 ; X86-AVX512-NEXT:    vinsertps $96, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
@@ -1353,7 +1475,7 @@ define <4 x float> @insertps_from_vector
 ;
 ; X64-AVX512-LABEL: insertps_from_vector_load_offset:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
 ; X64-AVX512-NEXT:    vinsertps $96, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
@@ -1389,7 +1511,7 @@ define <4 x float> @insertps_from_vector
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
 ; X86-AVX512-NEXT:    shll $4, %ecx ## encoding: [0xc1,0xe1,0x04]
-; X86-AVX512-NEXT:    vmovaps (%eax,%ecx), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0c,0x08]
+; X86-AVX512-NEXT:    vmovaps (%eax,%ecx), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0c,0x08]
 ; X86-AVX512-NEXT:    vinsertps $192, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm1[3],xmm0[1,2,3]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
@@ -1413,7 +1535,7 @@ define <4 x float> @insertps_from_vector
 ; X64-AVX512-LABEL: insertps_from_vector_load_offset_2:
 ; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    shlq $4, %rsi ## encoding: [0x48,0xc1,0xe6,0x04]
-; X64-AVX512-NEXT:    vmovaps (%rdi,%rsi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0c,0x37]
+; X64-AVX512-NEXT:    vmovaps (%rdi,%rsi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0c,0x37]
 ; X64-AVX512-NEXT:    vinsertps $192, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm1[3],xmm0[1,2,3]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
@@ -1444,7 +1566,7 @@ define <4 x float> @insertps_from_broadc
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; X86-AVX512-NEXT:    vbroadcastss (%ecx,%eax,4), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x0c,0x81]
+; X86-AVX512-NEXT:    vbroadcastss (%ecx,%eax,4), %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x0c,0x81]
 ; X86-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
@@ -1463,7 +1585,7 @@ define <4 x float> @insertps_from_broadc
 ;
 ; X64-AVX512-LABEL: insertps_from_broadcast_loadf32:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vbroadcastss (%rdi,%rsi,4), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x0c,0xb7]
+; X64-AVX512-NEXT:    vbroadcastss (%rdi,%rsi,4), %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x0c,0xb7]
 ; X64-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
@@ -1496,7 +1618,7 @@ define <4 x float> @insertps_from_broadc
 ; X86-AVX512-LABEL: insertps_from_broadcast_loadv4f32:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT:    vbroadcastss (%eax), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x08]
+; X86-AVX512-NEXT:    vbroadcastss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x08]
 ; X86-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
@@ -1516,7 +1638,7 @@ define <4 x float> @insertps_from_broadc
 ;
 ; X64-AVX512-LABEL: insertps_from_broadcast_loadv4f32:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vbroadcastss (%rdi), %xmm1 ## encoding: [0xc4,0xe2,0x79,0x18,0x0f]
+; X64-AVX512-NEXT:    vbroadcastss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x0f]
 ; X64-AVX512-NEXT:    vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm1[0]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
@@ -1572,18 +1694,18 @@ define <4 x float> @insertps_from_broadc
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; X86-AVX512-NEXT:    vbroadcastss (%ecx,%eax,4), %xmm4 ## encoding: [0xc4,0xe2,0x79,0x18,0x24,0x81]
+; X86-AVX512-NEXT:    vbroadcastss (%ecx,%eax,4), %xmm4 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x24,0x81]
 ; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm4[0]
 ; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
 ; X86-AVX512-NEXT:    ## xmm1 = xmm1[0,1,2],xmm4[0]
-; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xd4,0x30]
-; X86-AVX512-NEXT:    ## xmm2 = xmm2[0,1,2],xmm4[0]
-; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xdc,0x30]
-; X86-AVX512-NEXT:    ## xmm3 = xmm3[0,1,2],xmm4[0]
-; X86-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
-; X86-AVX512-NEXT:    vaddps %xmm3, %xmm2, %xmm1 ## encoding: [0xc5,0xe8,0x58,0xcb]
-; X86-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X86-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xcc,0x30]
+; X86-AVX512-NEXT:    ## xmm1 = xmm2[0,1,2],xmm4[0]
+; X86-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm3, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xd4,0x30]
+; X86-AVX512-NEXT:    ## xmm2 = xmm3[0,1,2],xmm4[0]
+; X86-AVX512-NEXT:    vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
+; X86-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: insertps_from_broadcast_multiple_use:
@@ -1621,18 +1743,18 @@ define <4 x float> @insertps_from_broadc
 ;
 ; X64-AVX512-LABEL: insertps_from_broadcast_multiple_use:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vbroadcastss (%rdi,%rsi,4), %xmm4 ## encoding: [0xc4,0xe2,0x79,0x18,0x24,0xb7]
+; X64-AVX512-NEXT:    vbroadcastss (%rdi,%rsi,4), %xmm4 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x24,0xb7]
 ; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[0,1,2],xmm4[0]
 ; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
 ; X64-AVX512-NEXT:    ## xmm1 = xmm1[0,1,2],xmm4[0]
-; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xd4,0x30]
-; X64-AVX512-NEXT:    ## xmm2 = xmm2[0,1,2],xmm4[0]
-; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xdc,0x30]
-; X64-AVX512-NEXT:    ## xmm3 = xmm3[0,1,2],xmm4[0]
-; X64-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
-; X64-AVX512-NEXT:    vaddps %xmm3, %xmm2, %xmm1 ## encoding: [0xc5,0xe8,0x58,0xcb]
-; X64-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
+; X64-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xcc,0x30]
+; X64-AVX512-NEXT:    ## xmm1 = xmm2[0,1,2],xmm4[0]
+; X64-AVX512-NEXT:    vinsertps $48, %xmm4, %xmm3, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xd4,0x30]
+; X64-AVX512-NEXT:    ## xmm2 = xmm3[0,1,2],xmm4[0]
+; X64-AVX512-NEXT:    vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
+; X64-AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = getelementptr inbounds float, float* %fb, i64 %index
   %2 = load float, float* %1, align 4
@@ -1675,7 +1797,7 @@ define <4 x float> @insertps_with_undefs
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX512-NEXT:    vmovss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
 ; X86-AVX512-NEXT:    ## xmm1 = mem[0],zero,zero,zero
-; X86-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
+; X86-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[0]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
@@ -1700,7 +1822,7 @@ define <4 x float> @insertps_with_undefs
 ; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vmovss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
 ; X64-AVX512-NEXT:    ## xmm1 = mem[0],zero,zero,zero
-; X64-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
+; X64-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[0]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load float, float* %b, align 4
@@ -1731,7 +1853,7 @@ define <4 x float> @pr20087(<4 x float>
 ; X86-AVX512-LABEL: pr20087:
 ; X86-AVX512:       ## %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
+; X86-AVX512-NEXT:    vmovaps (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x08]
 ; X86-AVX512-NEXT:    vinsertps $178, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
 ; X86-AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
 ; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
@@ -1752,7 +1874,7 @@ define <4 x float> @pr20087(<4 x float>
 ;
 ; X64-AVX512-LABEL: pr20087:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
+; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
 ; X64-AVX512-NEXT:    vinsertps $178, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
 ; X64-AVX512-NEXT:    ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
@@ -1773,15 +1895,25 @@ define void @insertps_pr20411(<4 x i32>
 ; X86-SSE-NEXT:    movdqu %xmm1, (%eax) ## encoding: [0xf3,0x0f,0x7f,0x08]
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: insertps_pr20411:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
-; X86-AVX-NEXT:    ## xmm1 = xmm1[2,3,0,1]
-; X86-AVX-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
-; X86-AVX-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; X86-AVX-NEXT:    vmovups %xmm0, (%eax) ## encoding: [0xc5,0xf8,0x11,0x00]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: insertps_pr20411:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; X86-AVX1-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; X86-AVX1-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; X86-AVX1-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; X86-AVX1-NEXT:    vmovups %xmm0, (%eax) ## encoding: [0xc5,0xf8,0x11,0x00]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: insertps_pr20411:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vpermilps $78, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; X86-AVX512-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; X86-AVX512-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; X86-AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; X86-AVX512-NEXT:    vmovups %xmm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x00]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: insertps_pr20411:
 ; X64-SSE:       ## %bb.0:
@@ -1792,14 +1924,23 @@ define void @insertps_pr20411(<4 x i32>
 ; X64-SSE-NEXT:    movdqu %xmm1, (%rdi) ## encoding: [0xf3,0x0f,0x7f,0x0f]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: insertps_pr20411:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
-; X64-AVX-NEXT:    ## xmm1 = xmm1[2,3,0,1]
-; X64-AVX-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
-; X64-AVX-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; X64-AVX-NEXT:    vmovups %xmm0, (%rdi) ## encoding: [0xc5,0xf8,0x11,0x07]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: insertps_pr20411:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vpermilps $78, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; X64-AVX1-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; X64-AVX1-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; X64-AVX1-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; X64-AVX1-NEXT:    vmovups %xmm0, (%rdi) ## encoding: [0xc5,0xf8,0x11,0x07]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: insertps_pr20411:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vpermilps $78, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x4e]
+; X64-AVX512-NEXT:    ## xmm1 = xmm1[2,3,0,1]
+; X64-AVX512-NEXT:    vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
+; X64-AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; X64-AVX512-NEXT:    vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %shuffle117 = shufflevector <4 x i32> %shuffle109, <4 x i32> %shuffle116, <4 x i32> <i32 0, i32 7, i32 undef, i32 undef>
   %ptrcast = bitcast i32* %RET to <4 x i32>*
   store <4 x i32> %shuffle117, <4 x i32>* %ptrcast, align 4
@@ -1848,8 +1989,10 @@ define <4 x float> @insertps_5(<4 x floa
 ;
 ; AVX512-LABEL: insertps_5:
 ; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vinsertps $92, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x5c]
-; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1],zero,zero
+; AVX512-NEXT:    vpblendd $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x02,0xc1,0x02]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512-NEXT:    vmovq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero
 ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
@@ -1930,6 +2073,8 @@ define <4 x float> @insertps_8(<4 x floa
 ; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vinsertps $28, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x1c]
 ; AVX512-NEXT:    ## xmm0 = xmm0[0],xmm1[0],zero,zero
+; AVX512-NEXT:    vmovq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
+; AVX512-NEXT:    ## xmm0 = xmm0[0],zero
 ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 0
   %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
@@ -1999,12 +2144,19 @@ define <4 x float> @build_vector_to_shuf
 ; SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: build_vector_to_shuffle_1:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX-NEXT:    vblendps $10, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x0a]
-; AVX-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: build_vector_to_shuffle_1:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vblendps $10, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x0a]
+; AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: build_vector_to_shuffle_1:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vblendps $10, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x0a]
+; AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 1
   %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
@@ -2020,12 +2172,19 @@ define <4 x float> @build_vector_to_shuf
 ; SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
-; AVX-LABEL: build_vector_to_shuffle_2:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX-NEXT:    vblendps $2, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x02]
-; AVX-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
-; AVX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX1-LABEL: build_vector_to_shuffle_2:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX1-NEXT:    vblendps $2, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x02]
+; AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: build_vector_to_shuffle_2:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
+; AVX512-NEXT:    vblendps $2, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x02]
+; AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
   %vecext = extractelement <4 x float> %A, i32 1
   %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
   %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2

Modified: llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll?rev=333830&r1=333829&r2=333830&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll Sat Jun  2 11:01:09 2018
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE
 ; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c
 

Modified: llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll?rev=333830&r1=333829&r2=333830&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll Sat Jun  2 11:01:09 2018
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
-; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
-; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
 ; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c
 
@@ -344,10 +344,16 @@ define <2 x i64> @test_mm_cmpgt_epi64(<2
 ; SSE-NEXT:    pcmpgtq %xmm1, %xmm0
 ; SSE-NEXT:    ret{{[l|q]}}
 ;
-; AVX-LABEL: test_mm_cmpgt_epi64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; AVX1-LABEL: test_mm_cmpgt_epi64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX512-LABEL: test_mm_cmpgt_epi64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %k0
+; AVX512-NEXT:    vpmovm2q %k0, %xmm0
+; AVX512-NEXT:    ret{{[l|q]}}
   %cmp = icmp sgt <2 x i64> %a0, %a1
   %res = sext <2 x i1> %cmp to <2 x i64>
   ret <2 x i64> %res

Modified: llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll?rev=333830&r1=333829&r2=333830&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll Sat Jun  2 11:01:09 2018
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
 ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
 
 define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) {
 ; SSE-LABEL: test_x86_sse42_pcmpestri128:
@@ -40,16 +40,27 @@ define i32 @test_x86_sse42_pcmpestri128_
 ; X86-SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: test_x86_sse42_pcmpestri128_load:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX-NEXT:    vmovdqa (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x00]
-; X86-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; X86-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; X86-AVX-NEXT:    vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
-; X86-AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: test_x86_sse42_pcmpestri128_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    vmovdqa (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x00]
+; X86-AVX1-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX1-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX1-NEXT:    vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
+; X86-AVX1-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse42_pcmpestri128_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512-NEXT:    vmovdqa (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x00]
+; X86-AVX512-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X86-AVX512-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X86-AVX512-NEXT:    vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
+; X86-AVX512-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: test_x86_sse42_pcmpestri128_load:
 ; X64-SSE:       ## %bb.0:
@@ -60,14 +71,23 @@ define i32 @test_x86_sse42_pcmpestri128_
 ; X64-SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: test_x86_sse42_pcmpestri128_load:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x07]
-; X64-AVX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; X64-AVX-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; X64-AVX-NEXT:    vpcmpestri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x06,0x07]
-; X64-AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: test_x86_sse42_pcmpestri128_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovdqa (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x07]
+; X64-AVX1-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX1-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX1-NEXT:    vpcmpestri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x06,0x07]
+; X64-AVX1-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse42_pcmpestri128_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovdqa (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x07]
+; X64-AVX512-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
+; X64-AVX512-NEXT:    movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; X64-AVX512-NEXT:    vpcmpestri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x06,0x07]
+; X64-AVX512-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load <16 x i8>, <16 x i8>* %a0
   %2 = load <16 x i8>, <16 x i8>* %a2
   %res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1]
@@ -409,14 +429,23 @@ define i32 @test_x86_sse42_pcmpistri128_
 ; X86-SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
 ; X86-SSE-NEXT:    retl ## encoding: [0xc3]
 ;
-; X86-AVX-LABEL: test_x86_sse42_pcmpistri128_load:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; X86-AVX-NEXT:    vmovdqa (%ecx), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x01]
-; X86-AVX-NEXT:    vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
-; X86-AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX1-LABEL: test_x86_sse42_pcmpistri128_load:
+; X86-AVX1:       ## %bb.0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX1-NEXT:    vmovdqa (%ecx), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x01]
+; X86-AVX1-NEXT:    vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
+; X86-AVX1-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X86-AVX1-NEXT:    retl ## encoding: [0xc3]
+;
+; X86-AVX512-LABEL: test_x86_sse42_pcmpistri128_load:
+; X86-AVX512:       ## %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX512-NEXT:    vmovdqa (%ecx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x01]
+; X86-AVX512-NEXT:    vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
+; X86-AVX512-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X86-AVX512-NEXT:    retl ## encoding: [0xc3]
 ;
 ; X64-SSE-LABEL: test_x86_sse42_pcmpistri128_load:
 ; X64-SSE:       ## %bb.0:
@@ -425,12 +454,19 @@ define i32 @test_x86_sse42_pcmpistri128_
 ; X64-SSE-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
-; X64-AVX-LABEL: test_x86_sse42_pcmpistri128_load:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x07]
-; X64-AVX-NEXT:    vpcmpistri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x06,0x07]
-; X64-AVX-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX1-LABEL: test_x86_sse42_pcmpistri128_load:
+; X64-AVX1:       ## %bb.0:
+; X64-AVX1-NEXT:    vmovdqa (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x07]
+; X64-AVX1-NEXT:    vpcmpistri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x06,0x07]
+; X64-AVX1-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
+;
+; X64-AVX512-LABEL: test_x86_sse42_pcmpistri128_load:
+; X64-AVX512:       ## %bb.0:
+; X64-AVX512-NEXT:    vmovdqa (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x07]
+; X64-AVX512-NEXT:    vpcmpistri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x06,0x07]
+; X64-AVX512-NEXT:    movl %ecx, %eax ## encoding: [0x89,0xc8]
+; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %1 = load <16 x i8>, <16 x i8>* %a0
   %2 = load <16 x i8>, <16 x i8>* %a1
   %res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll?rev=333830&r1=333829&r2=333830&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86_64.ll Sat Jun  2 11:01:09 2018
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,SSE
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
 
 declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind
 declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind




More information about the llvm-commits mailing list