[llvm] r333834 - [X86][SSE] Cleanup AVX1 intrinsics tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 2 14:35:48 PDT 2018


Author: rksimon
Date: Sat Jun  2 14:35:48 2018
New Revision: 333834

URL: http://llvm.org/viewvc/llvm-project?rev=333834&view=rev
Log:
[X86][SSE] Cleanup AVX1 intrinsics tests

Ensure we cover 32/64-bit targets for SSE/AVX/AVX512 cases as necessary, strip -mcpu usage.

Modified:
    llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
    llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll?rev=333834&r1=333833&r2=333834&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll Sat Jun  2 14:35:48 2018
@@ -1,34 +1,51 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX512VL,X86-AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX512VL,X64-AVX512VL
 
 ; We don't check any vinsertf128 variant with immediate 0 because that's just a blend.
 
 define <4 x double> @test_x86_avx_vinsertf128_pd_256_1(<4 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_avx_vinsertf128_pd_256_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vinsertf128_pd_256_1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vinsertf128_pd_256_1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %a0, <2 x double> %a1, i8 1)
   ret <4 x double> %res
 }
 declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind readnone
 
 define <8 x float> @test_x86_avx_vinsertf128_ps_256_1(<8 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_avx_vinsertf128_ps_256_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vinsertf128_ps_256_1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vinsertf128_ps_256_1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %a0, <4 x float> %a1, i8 1)
   ret <8 x float> %res
 }
 declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8) nounwind readnone
 
 define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vinsertf128_si_256_1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vinsertf128_si_256_1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 1)
   ret <8 x i32> %res
 }
@@ -40,8 +57,9 @@ define <8 x i32> @test_x86_avx_vinsertf1
 ; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; CHECK-NEXT:    ret{{[l|q]}}
+; CHECK-NEXT:    vblendps $240, %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe3,0x75,0x0c,0xc0,0xf0]
+; CHECK-NEXT:    # ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2)
   ret <8 x i32> %res
 }
@@ -50,33 +68,51 @@ declare <8 x i32> @llvm.x86.avx.vinsertf
 ; We don't check any vextractf128 variant with immediate 0 because that's just a move.
 
 define <2 x double> @test_x86_avx_vextractf128_pd_256_1(<4 x double> %a0) {
-; CHECK-LABEL: test_x86_avx_vextractf128_pd_256_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vextractf128_pd_256_1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vextractf128_pd_256_1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vextractf128 $1, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 1)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
 
 define <4 x float> @test_x86_avx_vextractf128_ps_256_1(<8 x float> %a0) {
-; CHECK-LABEL: test_x86_avx_vextractf128_ps_256_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vextractf128_ps_256_1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vextractf128_ps_256_1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vextractf128 $1, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a0, i8 1)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone
 
 define <4 x i32> @test_x86_avx_vextractf128_si_256_1(<8 x i32> %a0) {
-; CHECK-LABEL: test_x86_avx_vextractf128_si_256_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vextractf128_si_256_1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vextractf128_si_256_1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vextractf128 $1, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %a0, i8 1)
   ret <4 x i32> %res
 }
@@ -89,24 +125,39 @@ define <2 x double> @test_x86_avx_extrac
 ; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    ret{{[l|q]}}
+; CHECK-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2)
   ret <2 x double> %res
 }
 
 
 define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
-; X86-LABEL: test_x86_avx_vbroadcastf128_pd_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_x86_avx_vbroadcastf128_pd_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_x86_avx_vbroadcastf128_pd_256:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vbroadcastf128 (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1a,0x00]
+; X86-AVX-NEXT:    # ymm0 = mem[0,1,0,1]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx_vbroadcastf128_pd_256:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vbroadcastf128 (%eax), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1a,0x00]
+; X86-AVX512VL-NEXT:    # ymm0 = mem[0,1,0,1]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx_vbroadcastf128_pd_256:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vbroadcastf128 (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1a,0x07]
+; X64-AVX-NEXT:    # ymm0 = mem[0,1,0,1]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx_vbroadcastf128_pd_256:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vbroadcastf128 (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1a,0x07]
+; X64-AVX512VL-NEXT:    # ymm0 = mem[0,1,0,1]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
   ret <4 x double> %res
 }
@@ -114,16 +165,31 @@ declare <4 x double> @llvm.x86.avx.vbroa
 
 
 define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
-; X86-LABEL: test_x86_avx_vbroadcastf128_ps_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_x86_avx_vbroadcastf128_ps_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_x86_avx_vbroadcastf128_ps_256:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vbroadcastf128 (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1a,0x00]
+; X86-AVX-NEXT:    # ymm0 = mem[0,1,0,1]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx_vbroadcastf128_ps_256:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vbroadcastf128 (%eax), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1a,0x00]
+; X86-AVX512VL-NEXT:    # ymm0 = mem[0,1,0,1]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx_vbroadcastf128_ps_256:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vbroadcastf128 (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1a,0x07]
+; X64-AVX-NEXT:    # ymm0 = mem[0,1,0,1]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx_vbroadcastf128_ps_256:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vbroadcastf128 (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1a,0x07]
+; X64-AVX512VL-NEXT:    # ymm0 = mem[0,1,0,1]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
@@ -133,8 +199,9 @@ declare <8 x float> @llvm.x86.avx.vbroad
 define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_blend_pd_256:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; CHECK-NEXT:    ret{{[l|q]}}
+; CHECK-NEXT:    vblendps $192, %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe3,0x75,0x0c,0xc0,0xc0]
+; CHECK-NEXT:    # ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 7) ; <<4 x double>> [#uses=1]
   ret <4 x double> %res
 }
@@ -144,8 +211,9 @@ declare <4 x double> @llvm.x86.avx.blend
 define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_blend_ps_256:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; CHECK-NEXT:    ret{{[l|q]}}
+; CHECK-NEXT:    vblendps $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07]
+; CHECK-NEXT:    # ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
@@ -155,8 +223,8 @@ declare <8 x float> @llvm.x86.avx.blend.
 define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_dp_ps_256:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; CHECK-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x40,0xc1,0x07]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
@@ -164,10 +232,17 @@ declare <8 x float> @llvm.x86.avx.dp.ps.
 
 
 define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
-; CHECK-LABEL: test_x86_sse2_psll_dq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse2_psll_dq:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslldq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf8,0x01]
+; AVX-NEXT:    # xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse2_psll_dq:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpslldq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x01]
+; AVX512VL-NEXT:    # xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -175,10 +250,17 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq
 
 
 define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
-; CHECK-LABEL: test_x86_sse2_psrl_dq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse2_psrl_dq:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrldq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xd8,0x01]
+; AVX-NEXT:    # xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse2_psrl_dq:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrldq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x01]
+; AVX512VL-NEXT:    # xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -186,10 +268,17 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq
 
 
 define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse41_blendpd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_blendpd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vblendps $3, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x03]
+; AVX-NEXT:    # xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_blendpd:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovsd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf3,0x10,0xc0]
+; AVX512VL-NEXT:    # xmm0 = xmm0[0],xmm1[1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i8 2) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -199,8 +288,9 @@ declare <2 x double> @llvm.x86.sse41.ble
 define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_sse41_blendps:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
-; CHECK-NEXT:    ret{{[l|q]}}
+; CHECK-NEXT:    vblendps $8, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08]
+; CHECK-NEXT:    # xmm0 = xmm1[0,1,2],xmm0[3]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -210,8 +300,9 @@ declare <4 x float> @llvm.x86.sse41.blen
 define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
 ; CHECK-LABEL: test_x86_sse41_pblendw:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
-; CHECK-NEXT:    ret{{[l|q]}}
+; CHECK-NEXT:    vpblendw $7, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0e,0xc1,0x07]
+; CHECK-NEXT:    # xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i8 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -219,10 +310,15 @@ declare <8 x i16> @llvm.x86.sse41.pblend
 
 
 define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxbd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovsxbd %xmm0, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovsxbd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxbd %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x21,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovsxbd:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovsxbd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x21,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -230,10 +326,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovsx
 
 
 define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxbq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovsxbq %xmm0, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovsxbq:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxbq %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x22,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovsxbq:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovsxbq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x22,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -241,10 +342,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 
 define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxbw:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovsxbw %xmm0, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovsxbw:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxbw %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x20,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovsxbw:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovsxbw %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x20,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -252,10 +358,15 @@ declare <8 x i16> @llvm.x86.sse41.pmovsx
 
 
 define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxdq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovsxdq %xmm0, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovsxdq:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxdq %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x25,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovsxdq:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovsxdq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x25,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -263,10 +374,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 
 define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxwd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovsxwd %xmm0, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovsxwd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxwd %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x23,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovsxwd:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovsxwd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x23,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -274,10 +390,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovsx
 
 
 define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovsxwq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovsxwq %xmm0, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovsxwq:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxwq %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x24,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovsxwq:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovsxwq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x24,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -285,10 +406,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 
 define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxbd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovzxbd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxbd %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x31,0xc0]
+; AVX-NEXT:    # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovzxbd:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x31,0xc0]
+; AVX512VL-NEXT:    # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -296,10 +424,17 @@ declare <4 x i32> @llvm.x86.sse41.pmovzx
 
 
 define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxbq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovzxbq:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxbq %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x32,0xc0]
+; AVX-NEXT:    # xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovzxbq:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x32,0xc0]
+; AVX512VL-NEXT:    # xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -307,10 +442,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovzx
 
 
 define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxbw:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovzxbw:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxbw %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x30,0xc0]
+; AVX-NEXT:    # xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovzxbw:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbw %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x30,0xc0]
+; AVX512VL-NEXT:    # xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -318,10 +460,17 @@ declare <8 x i16> @llvm.x86.sse41.pmovzx
 
 
 define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxdq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovzxdq:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxdq %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x35,0xc0]
+; AVX-NEXT:    # xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovzxdq:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxdq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x35,0xc0]
+; AVX512VL-NEXT:    # xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -329,10 +478,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovzx
 
 
 define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxwd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovzxwd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxwd %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x33,0xc0]
+; AVX-NEXT:    # xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovzxwd:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x33,0xc0]
+; AVX512VL-NEXT:    # xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -340,10 +496,17 @@ declare <4 x i32> @llvm.x86.sse41.pmovzx
 
 
 define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_pmovzxwq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse41_pmovzxwq:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxwq %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x34,0xc0]
+; AVX-NEXT:    # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse41_pmovzxwq:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x34,0xc0]
+; AVX512VL-NEXT:    # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -351,10 +514,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovzx
 
 
 define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvtdq2pd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcvtdq2pd %xmm0, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse2_cvtdq2pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0 # encoding: [0xc5,0xfa,0xe6,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse2_cvtdq2pd:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvtdq2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0xe6,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -362,10 +530,15 @@ declare <2 x double> @llvm.x86.sse2.cvtd
 
 
 define <4 x double> @test_x86_avx_cvtdq2_pd_256(<4 x i32> %a0) {
-; CHECK-LABEL: test_x86_avx_cvtdq2_pd_256:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_cvtdq2_pd_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtdq2pd %xmm0, %ymm0 # encoding: [0xc5,0xfe,0xe6,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_cvtdq2_pd_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvtdq2pd %xmm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfe,0xe6,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32> %a0) ; <<4 x double>> [#uses=1]
   ret <4 x double> %res
 }
@@ -373,10 +546,15 @@ declare <4 x double> @llvm.x86.avx.cvtdq
 
 
 define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvtps2pd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcvtps2pd %xmm0, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_sse2_cvtps2pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtps2pd %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5a,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_sse2_cvtps2pd:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvtps2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -384,10 +562,15 @@ declare <2 x double> @llvm.x86.sse2.cvtp
 
 
 define <4 x double> @test_x86_avx_cvt_ps2_pd_256(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_avx_cvt_ps2_pd_256:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcvtps2pd %xmm0, %ymm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_cvt_ps2_pd_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtps2pd %xmm0, %ymm0 # encoding: [0xc5,0xfc,0x5a,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_cvt_ps2_pd_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvtps2pd %xmm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float> %a0) ; <<4 x double>> [#uses=1]
   ret <4 x double> %res
 }
@@ -396,20 +579,35 @@ declare <4 x double> @llvm.x86.avx.cvt.p
 
 define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
   ; add operation forces the execution domain.
-; X86-LABEL: test_x86_sse2_storeu_dq:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; X86-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
-; X86-NEXT:    vmovdqu %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_x86_sse2_storeu_dq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vmovdqu %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_x86_sse2_storeu_dq:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
+; X86-AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf8,0xc1]
+; X86-AVX-NEXT:    vmovdqu %xmm0, (%eax) # encoding: [0xc5,0xfa,0x7f,0x00]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_sse2_storeu_dq:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
+; X86-AVX512VL-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1]
+; X86-AVX512VL-NEXT:    vmovdqu %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x00]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse2_storeu_dq:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
+; X64-AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf8,0xc1]
+; X64-AVX-NEXT:    vmovdqu %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x7f,0x07]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_sse2_storeu_dq:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
+; X64-AVX512VL-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1]
+; X64-AVX512VL-NEXT:    vmovdqu %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x07]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2)
   ret void
@@ -419,22 +617,49 @@ declare void @llvm.x86.sse2.storeu.dq(i8
 
 define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
   ; fadd operation forces the execution domain.
-; X86-LABEL: test_x86_sse2_storeu_pd:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X86-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; X86-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
-; X86-NEXT:    vmovupd %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_x86_sse2_storeu_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; X64-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vmovupd %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_x86_sse2_storeu_pd:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
+; X86-AVX-NEXT:    vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    # xmm1 = xmm1[0],mem[0]
+; X86-AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
+; X86-AVX-NEXT:    vmovupd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x11,0x00]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_sse2_storeu_pd:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vmovsd {{\.LCPI.*}}, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x0d,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    # xmm1 = mem[0],zero
+; X86-AVX512VL-NEXT:    vpslldq $8, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x73,0xf9,0x08]
+; X86-AVX512VL-NEXT:    # xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; X86-AVX512VL-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
+; X86-AVX512VL-NEXT:    vmovupd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x00]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse2_storeu_pd:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
+; X64-AVX-NEXT:    vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    # xmm1 = xmm1[0],mem[0]
+; X64-AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
+; X64-AVX-NEXT:    vmovupd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x11,0x07]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_sse2_storeu_pd:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovsd {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x0d,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    # xmm1 = mem[0],zero
+; X64-AVX512VL-NEXT:    vpslldq $8, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x73,0xf9,0x08]
+; X64-AVX512VL-NEXT:    # xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; X64-AVX512VL-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
+; X64-AVX512VL-NEXT:    vmovupd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x07]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %a2 = fadd <2 x double> %a1, <double 0x0, double 0x4200000000000000>
   call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2)
   ret void
@@ -443,16 +668,27 @@ declare void @llvm.x86.sse2.storeu.pd(i8
 
 
 define void @test_x86_sse_storeu_ps(i8* %a0, <4 x float> %a1) {
-; X86-LABEL: test_x86_sse_storeu_ps:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vmovups %xmm0, (%eax)
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_x86_sse_storeu_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovups %xmm0, (%rdi)
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_x86_sse_storeu_ps:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovups %xmm0, (%eax) # encoding: [0xc5,0xf8,0x11,0x00]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_sse_storeu_ps:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vmovups %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x00]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_sse_storeu_ps:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovups %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x11,0x07]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_sse_storeu_ps:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovups %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
   ret void
 }
@@ -462,28 +698,45 @@ declare void @llvm.x86.sse.storeu.ps(i8*
 define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) {
   ; FIXME: unfortunately the execution domain fix pass changes this to vmovups and its hard to force with no 256-bit integer instructions
   ; add operation forces the execution domain.
-; X86-LABEL: test_x86_avx_storeu_dq_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X86-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; X86-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
-; X86-NEXT:    vpsubb %xmm2, %xmm0, %xmm0
-; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; X86-NEXT:    vmovups %ymm0, (%eax)
-; X86-NEXT:    vzeroupper
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_x86_avx_storeu_dq_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X64-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; X64-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
-; X64-NEXT:    vpsubb %xmm2, %xmm0, %xmm0
-; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; X64-NEXT:    vmovups %ymm0, (%rdi)
-; X64-NEXT:    vzeroupper
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_x86_avx_storeu_dq_256:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1 # encoding: [0xc4,0xe3,0x7d,0x19,0xc1,0x01]
+; X86-AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2 # encoding: [0xc5,0xe9,0x76,0xd2]
+; X86-AVX-NEXT:    vpsubb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xf8,0xca]
+; X86-AVX-NEXT:    vpsubb %xmm2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf8,0xc2]
+; X86-AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; X86-AVX-NEXT:    vmovups %ymm0, (%eax) # encoding: [0xc5,0xfc,0x11,0x00]
+; X86-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx_storeu_dq_256:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1 # encoding: [0xc5,0xf5,0x76,0xc9]
+; X86-AVX512VL-NEXT:    vpsubb %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf8,0xc1]
+; X86-AVX512VL-NEXT:    vmovdqu %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfe,0x7f,0x00]
+; X86-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx_storeu_dq_256:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1 # encoding: [0xc4,0xe3,0x7d,0x19,0xc1,0x01]
+; X64-AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2 # encoding: [0xc5,0xe9,0x76,0xd2]
+; X64-AVX-NEXT:    vpsubb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xf8,0xca]
+; X64-AVX-NEXT:    vpsubb %xmm2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf8,0xc2]
+; X64-AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; X64-AVX-NEXT:    vmovups %ymm0, (%rdi) # encoding: [0xc5,0xfc,0x11,0x07]
+; X64-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx_storeu_dq_256:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1 # encoding: [0xc5,0xf5,0x76,0xc9]
+; X64-AVX512VL-NEXT:    vpsubb %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf8,0xc1]
+; X64-AVX512VL-NEXT:    vmovdqu %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfe,0x7f,0x07]
+; X64-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %a2 = add <32 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2)
   ret void
@@ -493,22 +746,39 @@ declare void @llvm.x86.avx.storeu.dq.256
 
 define void @test_x86_avx_storeu_pd_256(i8* %a0, <4 x double> %a1) {
   ; add operation forces the execution domain.
-; X86-LABEL: test_x86_avx_storeu_pd_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X86-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
-; X86-NEXT:    vmovupd %ymm0, (%eax)
-; X86-NEXT:    vzeroupper
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_x86_avx_storeu_pd_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vmovupd %ymm0, (%rdi)
-; X64-NEXT:    vzeroupper
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_x86_avx_storeu_pd_256:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
+; X86-AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1]
+; X86-AVX-NEXT:    vmovupd %ymm0, (%eax) # encoding: [0xc5,0xfd,0x11,0x00]
+; X86-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx_storeu_pd_256:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
+; X86-AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
+; X86-AVX512VL-NEXT:    vmovupd %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x11,0x00]
+; X86-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx_storeu_pd_256:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
+; X64-AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1]
+; X64-AVX-NEXT:    vmovupd %ymm0, (%rdi) # encoding: [0xc5,0xfd,0x11,0x07]
+; X64-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx_storeu_pd_256:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
+; X64-AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
+; X64-AVX512VL-NEXT:    vmovupd %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x11,0x07]
+; X64-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %a2 = fadd <4 x double> %a1, <double 0x0, double 0x0, double 0x0, double 0x0>
   call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a2)
   ret void
@@ -517,18 +787,31 @@ declare void @llvm.x86.avx.storeu.pd.256
 
 
 define void @test_x86_avx_storeu_ps_256(i8* %a0, <8 x float> %a1) {
-; X86-LABEL: test_x86_avx_storeu_ps_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vmovups %ymm0, (%eax)
-; X86-NEXT:    vzeroupper
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_x86_avx_storeu_ps_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovups %ymm0, (%rdi)
-; X64-NEXT:    vzeroupper
-; X64-NEXT:    retq
+; X86-AVX-LABEL: test_x86_avx_storeu_ps_256:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovups %ymm0, (%eax) # encoding: [0xc5,0xfc,0x11,0x00]
+; X86-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx_storeu_ps_256:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vmovups %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x00]
+; X86-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx_storeu_ps_256:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovups %ymm0, (%rdi) # encoding: [0xc5,0xfc,0x11,0x07]
+; X64-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx_storeu_ps_256:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovups %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07]
+; X64-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   call void @llvm.x86.avx.storeu.ps.256(i8* %a0, <8 x float> %a1)
   ret void
 }
@@ -536,10 +819,17 @@ declare void @llvm.x86.avx.storeu.ps.256
 
 
 define <2 x double> @test_x86_avx_vpermil_pd(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_avx_vpermil_pd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vpermil_pd:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd $1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
+; AVX-NEXT:    # xmm0 = xmm0[1,0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vpermil_pd:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpermilpd $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0xc0,0x01]
+; AVX512VL-NEXT:    # xmm0 = xmm0[1,0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double> %a0, i8 1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -547,10 +837,17 @@ declare <2 x double> @llvm.x86.avx.vperm
 
 
 define <4 x double> @test_x86_avx_vpermil_pd_256(<4 x double> %a0) {
-; CHECK-LABEL: test_x86_avx_vpermil_pd_256:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vpermil_pd_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x07]
+; AVX-NEXT:    # ymm0 = ymm0[1,1,3,2]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vpermil_pd_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpermilpd $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x07]
+; AVX512VL-NEXT:    # ymm0 = ymm0[1,1,3,2]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double> %a0, i8 7) ; <<4 x double>> [#uses=1]
   ret <4 x double> %res
 }
@@ -558,10 +855,17 @@ declare <4 x double> @llvm.x86.avx.vperm
 
 
 define <4 x float> @test_x86_avx_vpermil_ps(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_avx_vpermil_ps:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vpermil_ps:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilps $7, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x04,0xc0,0x07]
+; AVX-NEXT:    # xmm0 = xmm0[3,1,0,0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vpermil_ps:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpermilps $7, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0x07]
+; AVX512VL-NEXT:    # xmm0 = xmm0[3,1,0,0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float> %a0, i8 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -569,10 +873,17 @@ declare <4 x float> @llvm.x86.avx.vpermi
 
 
 define <8 x float> @test_x86_avx_vpermil_ps_256(<8 x float> %a0) {
-; CHECK-LABEL: test_x86_avx_vpermil_ps_256:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vpermil_ps_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilps $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x04,0xc0,0x07]
+; AVX-NEXT:    # ymm0 = ymm0[3,1,0,0,7,5,4,4]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vpermil_ps_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpermilps $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x04,0xc0,0x07]
+; AVX512VL-NEXT:    # ymm0 = ymm0[3,1,0,0,7,5,4,4]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float> %a0, i8 7) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
@@ -580,10 +891,17 @@ declare <8 x float> @llvm.x86.avx.vpermi
 
 
 define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) {
-; CHECK-LABEL: test_x86_avx_vperm2f128_pd_256:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vperm2f128_pd_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vperm2f128 $33, %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe3,0x75,0x06,0xc0,0x21]
+; AVX-NEXT:    # ymm0 = ymm1[2,3],ymm0[0,1]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vperm2f128_pd_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vperm2f128 $33, %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x75,0x06,0xc0,0x21]
+; AVX512VL-NEXT:    # ymm0 = ymm1[2,3],ymm0[0,1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 3) ; <<4 x double>> [#uses=1]
   ret <4 x double> %res
 }
@@ -591,10 +909,17 @@ declare <4 x double> @llvm.x86.avx.vperm
 
 
 define <8 x float> @test_x86_avx_vperm2f128_ps_256(<8 x float> %a0, <8 x float> %a1) {
-; CHECK-LABEL: test_x86_avx_vperm2f128_ps_256:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vperm2f128_ps_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vperm2f128 $33, %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe3,0x75,0x06,0xc0,0x21]
+; AVX-NEXT:    # ymm0 = ymm1[2,3],ymm0[0,1]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vperm2f128_ps_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vperm2f128 $33, %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x75,0x06,0xc0,0x21]
+; AVX512VL-NEXT:    # ymm0 = ymm1[2,3],ymm0[0,1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float> %a0, <8 x float> %a1, i8 3) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
@@ -602,10 +927,17 @@ declare <8 x float> @llvm.x86.avx.vperm2
 
 
 define <8 x i32> @test_x86_avx_vperm2f128_si_256(<8 x i32> %a0, <8 x i32> %a1) {
-; CHECK-LABEL: test_x86_avx_vperm2f128_si_256:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_vperm2f128_si_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vperm2f128 $33, %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe3,0x75,0x06,0xc0,0x21]
+; AVX-NEXT:    # ymm0 = ymm1[2,3],ymm0[0,1]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_vperm2f128_si_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vperm2i128 $33, %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x75,0x46,0xc0,0x21]
+; AVX512VL-NEXT:    # ymm0 = ymm1[2,3],ymm0[0,1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32> %a0, <8 x i32> %a1, i8 3) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -613,10 +945,15 @@ declare <8 x i32> @llvm.x86.avx.vperm2f1
 
 
 define <8 x float> @test_x86_avx_cvtdq2_ps_256(<8 x i32> %a0) {
-; CHECK-LABEL: test_x86_avx_cvtdq2_ps_256:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test_x86_avx_cvtdq2_ps_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtdq2ps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5b,0xc0]
+; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_x86_avx_cvtdq2_ps_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvtdq2ps %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5b,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32> %a0) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll?rev=333834&r1=333833&r2=333834&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll Sat Jun  2 14:35:48 2018
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=X86 --check-prefix=X86-AVX
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X86 --check-prefix=X86-AVX512VL
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=X64 --check-prefix=X64-AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X64 --check-prefix=X64-AVX512VL
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+pclmul,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+pclmul,+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX512VL,X86-AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+pclmul,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+pclmul,+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX512VL,X64-AVX512VL
 
 define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_addsub_pd_256:

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll?rev=333834&r1=333833&r2=333834&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll Sat Jun  2 14:35:48 2018
@@ -1,22 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX512VL
 
 define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) {
 ; AVX-LABEL: test_x86_avx_vzeroall:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1]
 ; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vzeroall
+; AVX-NEXT:    # encoding: [0xc5,0xfd,0x11,0x44,0x24,0xc8]
+; AVX-NEXT:    vzeroall # encoding: [0xc5,0xfc,0x77]
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    retq
+; AVX-NEXT:    # encoding: [0xc5,0xfc,0x10,0x44,0x24,0xc8]
+; AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_vzeroall:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm16
-; AVX512VL-NEXT:    vzeroall
-; AVX512VL-NEXT:    vmovapd %ymm16, %ymm0
-; AVX512VL-NEXT:    retq
+; AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm16 # encoding: [0x62,0xe1,0xfd,0x28,0x58,0xc1]
+; AVX512VL-NEXT:    vzeroall # encoding: [0xc5,0xfc,0x77]
+; AVX512VL-NEXT:    vmovapd %ymm16, %ymm0 # encoding: [0x62,0xb1,0xfd,0x28,0x28,0xc0]
+; AVX512VL-NEXT:    retq # encoding: [0xc3]
   %c = fadd <4 x double> %a, %b
   call void @llvm.x86.avx.vzeroall()
   ret <4 x double> %c
@@ -26,18 +28,20 @@ declare void @llvm.x86.avx.vzeroall() no
 define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) {
 ; AVX-LABEL: test_x86_avx_vzeroupper:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1]
 ; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    # encoding: [0xc5,0xfd,0x11,0x44,0x24,0xc8]
+; AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    retq
+; AVX-NEXT:    # encoding: [0xc5,0xfc,0x10,0x44,0x24,0xc8]
+; AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_vzeroupper:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm16
-; AVX512VL-NEXT:    vzeroupper
-; AVX512VL-NEXT:    vmovapd %ymm16, %ymm0
-; AVX512VL-NEXT:    retq
+; AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm16 # encoding: [0x62,0xe1,0xfd,0x28,0x58,0xc1]
+; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; AVX512VL-NEXT:    vmovapd %ymm16, %ymm0 # encoding: [0x62,0xb1,0xfd,0x28,0x28,0xc0]
+; AVX512VL-NEXT:    retq # encoding: [0xc3]
   %c = fadd <4 x double> %a, %b
   call void @llvm.x86.avx.vzeroupper()
   ret <4 x double> %c




More information about the llvm-commits mailing list