[llvm] r302123 - [AVX-512VL] Autogenerate checks. Add --show-mc-encoding to check instruction predicate.

Igor Breger via llvm-commits llvm-commits at lists.llvm.org
Wed May 3 23:53:32 PDT 2017


Author: ibreger
Date: Thu May  4 01:53:31 2017
New Revision: 302123

URL: http://llvm.org/viewvc/llvm-project?rev=302123&view=rev
Log:
[AVX-512VL] Autogenerate checks. Add --show-mc-encoding to check instruction predicate.

Modified:
    llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll   (contents, props changed)

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll?rev=302123&r1=302122&r2=302123&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll Thu May  4 01:53:31 2017
@@ -1,36 +1,42 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl| FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl --show-mc-encoding| FileCheck %s
 
 ; 256-bit
 
-; CHECK-LABEL: vpaddq256_test
-; CHECK: vpaddq %ymm{{.*}}
-; CHECK: ret
 define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+; CHECK-LABEL: vpaddq256_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = add <4 x i64> %i, %j
   ret <4 x i64> %x
 }
 
-; CHECK-LABEL: vpaddq256_fold_test
-; CHECK: vpaddq (%rdi), %ymm{{.*}}
-; CHECK: ret
 define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind {
+; CHECK-LABEL: vpaddq256_fold_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddq (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %tmp = load <4 x i64>, <4 x i64>* %j, align 4
   %x = add <4 x i64> %i, %tmp
   ret <4 x i64> %x
 }
 
-; CHECK-LABEL: vpaddq256_broadcast_test
-; CHECK: vpaddq LCP{{.*}}(%rip){1to4}, %ymm{{.*}}
-; CHECK: ret
 define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind {
+; CHECK-LABEL: vpaddq256_broadcast_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI2_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = add <4 x i64> %i, <i64 1, i64 1, i64 1, i64 1>
   ret <4 x i64> %x
 }
 
-; CHECK-LABEL: vpaddq256_broadcast2_test
-; CHECK: vpaddq (%rdi){1to4}, %ymm{{.*}}
-; CHECK: ret
 define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind {
+; CHECK-LABEL: vpaddq256_broadcast2_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %j = load i64, i64* %j.ptr
   %j.0 = insertelement <4 x i64> undef, i64 %j, i32 0
   %j.v = shufflevector <4 x i64> %j.0, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -38,55 +44,68 @@ define <4 x i64> @vpaddq256_broadcast2_t
   ret <4 x i64> %x
 }
 
-; CHECK-LABEL: vpaddd256_test
-; CHECK: vpaddd %ymm{{.*}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+; CHECK-LABEL: vpaddd256_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = add <8 x i32> %i, %j
   ret <8 x i32> %x
 }
 
-; CHECK-LABEL: vpaddd256_fold_test
-; CHECK: vpaddd (%rdi), %ymm{{.*}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind {
+; CHECK-LABEL: vpaddd256_fold_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddd (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %tmp = load <8 x i32>, <8 x i32>* %j, align 4
   %x = add <8 x i32> %i, %tmp
   ret <8 x i32> %x
 }
 
-; CHECK-LABEL: vpaddd256_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_broadcast_test(<8 x i32> %i) nounwind {
+; CHECK-LABEL: vpaddd256_broadcast_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI6_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   ret <8 x i32> %x
 }
 
-; CHECK-LABEL: vpaddd256_mask_test
-; CHECK: vpaddd %ymm{{.*%k[1-7].*}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_mask_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT:    vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04]
+; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %x = add <8 x i32> %i, %j
   %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
   ret <8 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd256_maskz_test
-; CHECK: vpaddd %ymm{{.*{%k[1-7]} {z}.*}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_maskz_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT:    vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04]
+; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %x = add <8 x i32> %i, %j
   %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
   ret <8 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd256_mask_fold_test
-; CHECK: vpaddd (%rdi), %ymm{{.*%k[1-7]}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_mask_fold_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT:    vpaddd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %j = load <8 x i32>, <8 x i32>* %j.ptr
   %x = add <8 x i32> %i, %j
@@ -94,20 +113,27 @@ define <8 x i32> @vpaddd256_mask_fold_te
   ret <8 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd256_mask_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]}}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_mask_broadcast_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI10_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
   ret <8 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd256_maskz_fold_test
-; CHECK: vpaddd (%rdi), %ymm{{.*{%k[1-7]} {z}}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_maskz_fold_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT:    vpaddd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %j = load <8 x i32>, <8 x i32>* %j.ptr
   %x = add <8 x i32> %i, %j
@@ -115,96 +141,111 @@ define <8 x i32> @vpaddd256_maskz_fold_t
   ret <8 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd256_maskz_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]} {z}}}
-; CHECK: ret
 define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_maskz_broadcast_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI12_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
   ret <8 x i32> %r
 }
 
-; CHECK-LABEL: vpsubq256_test
-; CHECK: vpsubq %ymm{{.*}}
-; CHECK: ret
 define <4 x i64> @vpsubq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+; CHECK-LABEL: vpsubq256_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpsubq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfb,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = sub <4 x i64> %i, %j
   ret <4 x i64> %x
 }
 
-; CHECK-LABEL: vpsubd256_test
-; CHECK: vpsubd %ymm{{.*}}
-; CHECK: ret
 define <8 x i32> @vpsubd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+; CHECK-LABEL: vpsubd256_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpsubd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfa,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = sub <8 x i32> %i, %j
   ret <8 x i32> %x
 }
 
-; CHECK-LABEL: vpmulld256_test
-; CHECK: vpmulld %ymm{{.*}}
-; CHECK: ret
 define <8 x i32> @vpmulld256_test(<8 x i32> %i, <8 x i32> %j) {
+; CHECK-LABEL: vpmulld256_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpmulld %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x40,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = mul <8 x i32> %i, %j
   ret <8 x i32> %x
 }
 
-; CHECK-LABEL: test_vaddpd_256
-; CHECK: vaddpd{{.*}}
-; CHECK: ret
 define <4 x double> @test_vaddpd_256(<4 x double> %y, <4 x double> %x) {
+; CHECK-LABEL: test_vaddpd_256:
+; CHECK:       ## BB#0: ## %entry
+; CHECK-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
 entry:
   %add.i = fadd <4 x double> %x, %y
   ret <4 x double> %add.i
 }
 
-; CHECK-LABEL: test_fold_vaddpd_256
-; CHECK: vaddpd LCP{{.*}}(%rip){{.*}}
-; CHECK: ret
 define <4 x double> @test_fold_vaddpd_256(<4 x double> %y) {
+; CHECK-LABEL: test_fold_vaddpd_256:
+; CHECK:       ## BB#0: ## %entry
+; CHECK-NEXT:    vaddpd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 4, value: LCPI17_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
 entry:
   %add.i = fadd <4 x double> %y, <double 4.500000e+00, double 3.400000e+00, double 4.500000e+00, double 5.600000e+00>
   ret <4 x double> %add.i
 }
 
-; CHECK-LABEL: test_broadcast_vaddpd_256
-; CHECK: LCP{{.*}}(%rip){1to8}, %ymm0, %ymm0
-; CHECK: ret
 define <8 x float> @test_broadcast_vaddpd_256(<8 x float> %a) nounwind {
+; CHECK-LABEL: test_broadcast_vaddpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x58,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI18_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = fadd <8 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
   ret <8 x float> %b
 }
 
-; CHECK-LABEL: test_mask_vaddps_256
-; CHECK: vaddps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i,
-                                        <8 x float> %j, <8 x i32> %mask1)
-                                        nounwind readnone {
+define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vaddps_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vaddps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x58,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %x = fadd <8 x float> %i, %j
   %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
   ret <8 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vmulps_256
-; CHECK: vmulps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i,
-                                        <8 x float> %j, <8 x i32> %mask1)
-                                        nounwind readnone {
+define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmulps_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vmulps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x59,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %x = fmul <8 x float> %i, %j
   %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
   ret <8 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vminps_256
-; CHECK: vminps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i,
-                                        <8 x float> %j, <8 x i32> %mask1)
-                                        nounwind readnone {
+define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1)nounwind readnone {
+; CHECK-LABEL: test_mask_vminps_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vminps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5d,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %cmp_res = fcmp olt <8 x float> %i, %j
   %min = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j
@@ -212,12 +253,13 @@ define <8 x float> @test_mask_vminps_256
   ret <8 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vmaxps_256
-; CHECK: vmaxps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i,
-                                        <8 x float> %j, <8 x i32> %mask1)
-                                        nounwind readnone {
+define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmaxps_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vmaxps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %cmp_res = fcmp ogt <8 x float> %i, %j
   %max = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j
@@ -225,48 +267,52 @@ define <8 x float> @test_mask_vmaxps_256
   ret <8 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vsubps_256
-; CHECK: vsubps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i,
-                                        <8 x float> %j, <8 x i32> %mask1)
-                                        nounwind readnone {
+define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vsubps_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vsubps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5c,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %x = fsub <8 x float> %i, %j
   %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
   ret <8 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vdivps_256
-; CHECK: vdivps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i,
-                                        <8 x float> %j, <8 x i32> %mask1)
-                                        nounwind readnone {
+define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vdivps_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vdivps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5e,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
   %x = fdiv <8 x float> %i, %j
   %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
   ret <8 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vmulpd_256
-; CHECK: vmulpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i,
-                                        <4 x double> %j, <4 x i64> %mask1)
-                                        nounwind readnone {
+define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmulpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vmulpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x59,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %x = fmul <4 x double> %i, %j
   %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vminpd_256
-; CHECK: vminpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i,
-                                        <4 x double> %j, <4 x i64> %mask1)
-                                        nounwind readnone {
+define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vminpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vminpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5d,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %cmp_res = fcmp olt <4 x double> %i, %j
   %min = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j
@@ -274,12 +320,13 @@ define <4 x double> @test_mask_vminpd_25
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vmaxpd_256
-; CHECK: vmaxpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i,
-                                        <4 x double> %j, <4 x i64> %mask1)
-                                        nounwind readnone {
+define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmaxpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vmaxpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %cmp_res = fcmp ogt <4 x double> %i, %j
   %max = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j
@@ -287,59 +334,65 @@ define <4 x double> @test_mask_vmaxpd_25
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vsubpd_256
-; CHECK: vsubpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i,
-                                        <4 x double> %j, <4 x i64> %mask1)
-                                        nounwind readnone {
+define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vsubpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vsubpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5c,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %x = fsub <4 x double> %i, %j
   %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vdivpd_256
-; CHECK: vdivpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i,
-                                        <4 x double> %j, <4 x i64> %mask1)
-                                        nounwind readnone {
+define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vdivpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vdivpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5e,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %x = fdiv <4 x double> %i, %j
   %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vaddpd_256
-; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i,
-                                         <4 x double> %j, <4 x i64> %mask1)
-                                         nounwind readnone {
+define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vaddpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vaddpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %x = fadd <4 x double> %i, %j
   %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_maskz_vaddpd_256
-; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}}}
-; CHECK: ret
-define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j,
-                                          <4 x i64> %mask1) nounwind readnone {
+define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_maskz_vaddpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT:    vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04]
+; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %x = fadd <4 x double> %i, %j
   %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_mask_fold_vaddpd_256
-; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}.*}}
-; CHECK: ret
-define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i,
-                                         <4 x double>* %j,  <4 x i64> %mask1)
-                                         nounwind {
+define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double>* %j,  <4 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_mask_fold_vaddpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT:    vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04]
+; CHECK-NEXT:    vaddpd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %tmp = load <4 x double>, <4 x double>* %j
   %x = fadd <4 x double> %i, %tmp
@@ -347,11 +400,13 @@ define <4 x double> @test_mask_fold_vadd
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_maskz_fold_vaddpd_256
-; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}.*}}
-; CHECK: ret
-define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j,
-                                          <4 x i64> %mask1) nounwind {
+define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_maskz_fold_vaddpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %tmp = load <4 x double>, <4 x double>* %j
   %x = fadd <4 x double> %i, %tmp
@@ -359,43 +414,46 @@ define <4 x double> @test_maskz_fold_vad
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_broadcast2_vaddpd_256
-; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*}}
-; CHECK: ret
 define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind {
+; CHECK-LABEL: test_broadcast2_vaddpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vaddpd (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0x58,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %tmp = load double, double* %j
   %b = insertelement <4 x double> undef, double %tmp, i32 0
-  %c = shufflevector <4 x double> %b, <4 x double> undef,
-                     <4 x i32> zeroinitializer
+  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
   %x = fadd <4 x double> %c, %i
   ret <4 x double> %x
 }
 
-; CHECK-LABEL: test_mask_broadcast_vaddpd_256
-; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]}.*}}
-; CHECK: ret
-define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i,
-                                          double* %j, <4 x i64> %mask1) nounwind {
+define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, double* %j, <4 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_mask_broadcast_vaddpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm0, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc0]
+; CHECK-NEXT:    vpcmpneqq %ymm0, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xc8,0x04]
+; CHECK-NEXT:    vaddpd (%rdi){1to4}, %ymm1, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x39,0x58,0x0f]
+; CHECK-NEXT:    vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %tmp = load double, double* %j
   %b = insertelement <4 x double> undef, double %tmp, i32 0
-  %c = shufflevector <4 x double> %b, <4 x double> undef,
-                     <4 x i32> zeroinitializer
+  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
   %x = fadd <4 x double> %c, %i
   %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %i
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: test_maskz_broadcast_vaddpd_256
-; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]} {z}.*}}
-; CHECK: ret
-define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j,
-                                           <4 x i64> %mask1) nounwind {
+define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, <4 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_maskz_broadcast_vaddpd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT:    vaddpd (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x58,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i64> %mask1, zeroinitializer
   %tmp = load double, double* %j
   %b = insertelement <4 x double> undef, double %tmp, i32 0
-  %c = shufflevector <4 x double> %b, <4 x double> undef,
-                     <4 x i32> zeroinitializer
+  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
   %x = fadd <4 x double> %c, %i
   %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
   ret <4 x double> %r
@@ -403,27 +461,30 @@ define <4 x double> @test_maskz_broadcas
 
 ; 128-bit
 
-; CHECK-LABEL: vpaddq128_test
-; CHECK: vpaddq %xmm{{.*}}
-; CHECK: ret
 define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
+; CHECK-LABEL: vpaddq128_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = add <2 x i64> %i, %j
   ret <2 x i64> %x
 }
 
-; CHECK-LABEL: vpaddq128_fold_test
-; CHECK: vpaddq (%rdi), %xmm{{.*}}
-; CHECK: ret
 define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind {
+; CHECK-LABEL: vpaddq128_fold_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddq (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %tmp = load <2 x i64>, <2 x i64>* %j, align 4
   %x = add <2 x i64> %i, %tmp
   ret <2 x i64> %x
 }
 
-; CHECK-LABEL: vpaddq128_broadcast2_test
-; CHECK: vpaddq (%rdi){1to2}, %xmm{{.*}}
-; CHECK: ret
 define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind {
+; CHECK-LABEL: vpaddq128_broadcast2_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0xd4,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %tmp = load i64, i64* %j
   %j.0 = insertelement <2 x i64> undef, i64 %tmp, i32 0
   %j.1 = insertelement <2 x i64> %j.0, i64 %tmp, i32 1
@@ -431,55 +492,68 @@ define <2 x i64> @vpaddq128_broadcast2_t
   ret <2 x i64> %x
 }
 
-; CHECK-LABEL: vpaddd128_test
-; CHECK: vpaddd %xmm{{.*}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
+; CHECK-LABEL: vpaddd128_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = add <4 x i32> %i, %j
   ret <4 x i32> %x
 }
 
-; CHECK-LABEL: vpaddd128_fold_test
-; CHECK: vpaddd (%rdi), %xmm{{.*}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind {
+; CHECK-LABEL: vpaddd128_fold_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddd (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %tmp = load <4 x i32>, <4 x i32>* %j, align 4
   %x = add <4 x i32> %i, %tmp
   ret <4 x i32> %x
 }
 
-; CHECK-LABEL: vpaddd128_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_broadcast_test(<4 x i32> %i) nounwind {
+; CHECK-LABEL: vpaddd128_broadcast_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI42_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %x
 }
 
-; CHECK-LABEL: vpaddd128_mask_test
-; CHECK: vpaddd %xmm{{.*%k[1-7].*}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_mask_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_mask_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
+; CHECK-NEXT:    vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04]
+; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %x = add <4 x i32> %i, %j
   %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
   ret <4 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd128_maskz_test
-; CHECK: vpaddd %xmm{{.*{%k[1-7]} {z}.*}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_maskz_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
+; CHECK-NEXT:    vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04]
+; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %x = add <4 x i32> %i, %j
   %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
   ret <4 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd128_mask_fold_test
-; CHECK: vpaddd (%rdi), %xmm{{.*%k[1-7]}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_mask_fold_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT:    vpaddd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %j = load <4 x i32>, <4 x i32>* %j.ptr
   %x = add <4 x i32> %i, %j
@@ -487,20 +561,27 @@ define <4 x i32> @vpaddd128_mask_fold_te
   ret <4 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd128_mask_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]}}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_mask_broadcast_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI46_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
   %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
   ret <4 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd128_maskz_fold_test
-; CHECK: vpaddd (%rdi), %xmm{{.*{%k[1-7]} {z}}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_maskz_fold_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT:    vpaddd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %j = load <4 x i32>, <4 x i32>* %j.ptr
   %x = add <4 x i32> %i, %j
@@ -508,96 +589,111 @@ define <4 x i32> @vpaddd128_maskz_fold_t
   ret <4 x i32> %r
 }
 
-; CHECK-LABEL: vpaddd128_maskz_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]} {z}}}
-; CHECK: ret
 define <4 x i32> @vpaddd128_maskz_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_maskz_broadcast_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI48_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
   %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
   ret <4 x i32> %r
 }
 
-; CHECK-LABEL: vpsubq128_test
-; CHECK: vpsubq %xmm{{.*}}
-; CHECK: ret
 define <2 x i64> @vpsubq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
+; CHECK-LABEL: vpsubq128_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = sub <2 x i64> %i, %j
   ret <2 x i64> %x
 }
 
-; CHECK-LABEL: vpsubd128_test
-; CHECK: vpsubd %xmm{{.*}}
-; CHECK: ret
 define <4 x i32> @vpsubd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
+; CHECK-LABEL: vpsubd128_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = sub <4 x i32> %i, %j
   ret <4 x i32> %x
 }
 
-; CHECK-LABEL: vpmulld128_test
-; CHECK: vpmulld %xmm{{.*}}
-; CHECK: ret
 define <4 x i32> @vpmulld128_test(<4 x i32> %i, <4 x i32> %j) {
+; CHECK-LABEL: vpmulld128_test:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpmulld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x40,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %x = mul <4 x i32> %i, %j
   ret <4 x i32> %x
 }
 
-; CHECK-LABEL: test_vaddpd_128
-; CHECK: vaddpd{{.*}}
-; CHECK: ret
 define <2 x double> @test_vaddpd_128(<2 x double> %y, <2 x double> %x) {
+; CHECK-LABEL: test_vaddpd_128:
+; CHECK:       ## BB#0: ## %entry
+; CHECK-NEXT:    vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
 entry:
   %add.i = fadd <2 x double> %x, %y
   ret <2 x double> %add.i
 }
 
-; CHECK-LABEL: test_fold_vaddpd_128
-; CHECK: vaddpd LCP{{.*}}(%rip){{.*}}
-; CHECK: ret
 define <2 x double> @test_fold_vaddpd_128(<2 x double> %y) {
+; CHECK-LABEL: test_fold_vaddpd_128:
+; CHECK:       ## BB#0: ## %entry
+; CHECK-NEXT:    vaddpd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 4, value: LCPI53_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
 entry:
   %add.i = fadd <2 x double> %y, <double 4.500000e+00, double 3.400000e+00>
   ret <2 x double> %add.i
 }
 
-; CHECK-LABEL: test_broadcast_vaddpd_128
-; CHECK: LCP{{.*}}(%rip){1to4}, %xmm0, %xmm0
-; CHECK: ret
 define <4 x float> @test_broadcast_vaddpd_128(<4 x float> %a) nounwind {
+; CHECK-LABEL: test_broadcast_vaddpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x58,0x05,A,A,A,A]
+; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI54_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = fadd <4 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
   ret <4 x float> %b
 }
 
-; CHECK-LABEL: test_mask_vaddps_128
-; CHECK: vaddps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i,
-                                        <4 x float> %j, <4 x i32> %mask1)
-                                        nounwind readnone {
+define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vaddps_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vaddps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x58,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %x = fadd <4 x float> %i, %j
   %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
   ret <4 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vmulps_128
-; CHECK: vmulps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i,
-                                        <4 x float> %j, <4 x i32> %mask1)
-                                        nounwind readnone {
+define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmulps_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vmulps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x59,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %x = fmul <4 x float> %i, %j
   %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
   ret <4 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vminps_128
-; CHECK: vminps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i,
-                                        <4 x float> %j, <4 x i32> %mask1)
-                                        nounwind readnone {
+define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vminps_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vminps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5d,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %cmp_res = fcmp olt <4 x float> %i, %j
   %min = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j
@@ -605,12 +701,13 @@ define <4 x float> @test_mask_vminps_128
   ret <4 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vmaxps_128
-; CHECK: vmaxps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i,
-                                        <4 x float> %j, <4 x i32> %mask1)
-                                        nounwind readnone {
+define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmaxps_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vmaxps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %cmp_res = fcmp ogt <4 x float> %i, %j
   %max = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j
@@ -618,12 +715,13 @@ define <4 x float> @test_mask_vmaxps_128
   ret <4 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vsubps_128
-; CHECK: vsubps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i,
-                                        <4 x float> %j, <4 x i32> %mask1)
-                                        nounwind readnone {
+define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vsubps_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vsubps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5c,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %x = fsub <4 x float> %i, %j
   %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
@@ -631,36 +729,39 @@ define <4 x float> @test_mask_vsubps_128
 }
 
 
-; CHECK-LABEL: test_mask_vdivps_128
-; CHECK: vdivps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i,
-                                        <4 x float> %j, <4 x i32> %mask1)
-                                        nounwind readnone {
+define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vdivps_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vdivps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5e,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <4 x i32> %mask1, zeroinitializer
   %x = fdiv <4 x float> %i, %j
   %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
   ret <4 x float> %r
 }
 
-; CHECK-LABEL: test_mask_vmulpd_128
-; CHECK: vmulpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i,
-                                        <2 x double> %j, <2 x i64> %mask1)
-                                        nounwind readnone {
+define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmulpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vmulpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x59,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %x = fmul <2 x double> %i, %j
   %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vminpd_128
-; CHECK: vminpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i,
-                                        <2 x double> %j, <2 x i64> %mask1)
-                                        nounwind readnone {
+define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vminpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vminpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5d,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %cmp_res = fcmp olt <2 x double> %i, %j
   %min = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j
@@ -668,12 +769,13 @@ define <2 x double> @test_mask_vminpd_12
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vmaxpd_128
-; CHECK: vmaxpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i,
-                                        <2 x double> %j, <2 x i64> %mask1)
-                                        nounwind readnone {
+define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmaxpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vmaxpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %cmp_res = fcmp ogt <2 x double> %i, %j
   %max = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j
@@ -681,46 +783,52 @@ define <2 x double> @test_mask_vmaxpd_12
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vsubpd_128
-; CHECK: vsubpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i,
-                                        <2 x double> %j, <2 x i64> %mask1)
-                                        nounwind readnone {
+define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vsubpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vsubpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5c,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %x = fsub <2 x double> %i, %j
   %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vdivpd_128
-; CHECK: vdivpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i,
-                                        <2 x double> %j, <2 x i64> %mask1)
-                                        nounwind readnone {
+define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vdivpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vdivpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5e,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %x = fdiv <2 x double> %i, %j
   %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_mask_vaddpd_128
-; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i,
-                                         <2 x double> %j, <2 x i64> %mask1)
-                                         nounwind readnone {
+define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vaddpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT:    vaddpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %x = fadd <2 x double> %i, %j
   %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_maskz_vaddpd_128
-; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}}}
-; CHECK: ret
 define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j,
+; CHECK-LABEL: test_maskz_vaddpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
+; CHECK-NEXT:    vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04]
+; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
                                           <2 x i64> %mask1) nounwind readnone {
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %x = fadd <2 x double> %i, %j
@@ -728,12 +836,13 @@ define <2 x double> @test_maskz_vaddpd_1
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_mask_fold_vaddpd_128
-; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}.*}}
-; CHECK: ret
-define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i,
-                                         <2 x double>* %j,  <2 x i64> %mask1)
-                                         nounwind {
+define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double>* %j,  <2 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_mask_fold_vaddpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
+; CHECK-NEXT:    vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04]
+; CHECK-NEXT:    vaddpd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %tmp = load <2 x double>, <2 x double>* %j
   %x = fadd <2 x double> %i, %tmp
@@ -741,11 +850,13 @@ define <2 x double> @test_mask_fold_vadd
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_maskz_fold_vaddpd_128
-; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}.*}}
-; CHECK: ret
-define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j,
-                                          <2 x i64> %mask1) nounwind {
+define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_maskz_fold_vaddpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT:    vaddpd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %tmp = load <2 x double>, <2 x double>* %j
   %x = fadd <2 x double> %i, %tmp
@@ -753,10 +864,11 @@ define <2 x double> @test_maskz_fold_vad
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_broadcast2_vaddpd_128
-; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*}}
-; CHECK: ret
 define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind {
+; CHECK-LABEL: test_broadcast2_vaddpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vaddpd (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0x58,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %tmp = load double, double* %j
   %j.0 = insertelement <2 x double> undef, double %tmp, i64 0
   %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
@@ -764,12 +876,14 @@ define <2 x double> @test_broadcast2_vad
   ret <2 x double> %x
 }
 
-; CHECK-LABEL: test_mask_broadcast_vaddpd_128
-; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]}.*}}
-; CHECK: ret
-define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i,
-                                          double* %j, <2 x i64> %mask1)
-                                          nounwind {
+define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i, double* %j, <2 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_mask_broadcast_vaddpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
+; CHECK-NEXT:    vpcmpneqq %xmm0, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xc8,0x04]
+; CHECK-NEXT:    vaddpd (%rdi){1to2}, %xmm1, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x19,0x58,0x0f]
+; CHECK-NEXT:    vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %tmp = load double, double* %j
   %j.0 = insertelement <2 x double> undef, double %tmp, i64 0
@@ -779,11 +893,13 @@ define <2 x double> @test_mask_broadcast
   ret <2 x double> %r
 }
 
-; CHECK-LABEL: test_maskz_broadcast_vaddpd_128
-; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]} {z}.*}}
-; CHECK: ret
-define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j,
-                                           <2 x i64> %mask1) nounwind {
+define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j, <2 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_maskz_broadcast_vaddpd_128:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT:    vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT:    vaddpd (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0x58,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask = icmp ne <2 x i64> %mask1, zeroinitializer
   %tmp = load double, double* %j
   %j.0 = insertelement <2 x double> undef, double %tmp, i64 0

Propchange: llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll
------------------------------------------------------------------------------
    svn:executable = *




More information about the llvm-commits mailing list