[llvm] r268884 - [AVX512] Add VLX 128/256-bit SET0 operations that encode to 128/256-bit EVEX encoded VPXORD so all 32 registers can be used.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun May 8 14:33:53 PDT 2016


Author: ctopper
Date: Sun May  8 16:33:53 2016
New Revision: 268884

URL: http://llvm.org/viewvc/llvm-project?rev=268884&view=rev
Log:
[AVX512] Add VLX 128/256-bit SET0 operations that encode to 128/256-bit EVEX encoded VPXORD so all 32 registers can be used.

Modified:
    llvm/trunk/lib/Target/X86/X86InstrAVX512.td
    llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
    llvm/trunk/lib/Target/X86/X86InstrSSE.td
    llvm/trunk/test/CodeGen/X86/avx512-arith.ll
    llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll
    llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll
    llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll
    llvm/trunk/test/CodeGen/X86/fma_patterns.ll
    llvm/trunk/test/CodeGen/X86/masked_memop.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Sun May  8 16:33:53 2016
@@ -483,6 +483,14 @@ def AVX512_512_SET0 : I<0, Pseudo, (outs
                [(set VR512:$dst, (v16i32 immAllZerosV))]>;
 }
 
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+    isPseudo = 1, Predicates = [HasVLX] in {
+def AVX512_128_SET0 : I<0, Pseudo, (outs VR128X:$dst), (ins), "",
+               [(set VR128X:$dst, (v4i32 immAllZerosV))]>;
+def AVX512_256_SET0 : I<0, Pseudo, (outs VR256X:$dst), (ins), "",
+               [(set VR256X:$dst, (v8i32 immAllZerosV))]>;
+}
+
 //===----------------------------------------------------------------------===//
 // AVX-512 - VECTOR INSERT
 //

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Sun May  8 16:33:53 2016
@@ -5512,6 +5512,10 @@ bool X86InstrInfo::expandPostRAPseudo(Ma
   case X86::AVX_SET0:
     assert(HasAVX && "AVX not supported");
     return Expand2AddrUndef(MIB, get(X86::VXORPSYrr));
+  case X86::AVX512_128_SET0:
+    return Expand2AddrUndef(MIB, get(X86::VPXORDZ128rr));
+  case X86::AVX512_256_SET0:
+    return Expand2AddrUndef(MIB, get(X86::VPXORDZ256rr));
   case X86::AVX512_512_SET0:
     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
   case X86::V_SETALLONES:

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Sun May  8 16:33:53 2016
@@ -472,11 +472,12 @@ let isReMaterializable = 1, isAsCheapAsA
 // We set canFoldAsLoad because this can be converted to a constant-pool
 // load of an all-zeros value if folding it would be beneficial.
 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
-    isPseudo = 1, SchedRW = [WriteZero] in {
+    isPseudo = 1, Predicates = [NoVLX], SchedRW = [WriteZero] in {
 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
                [(set VR128:$dst, (v4f32 immAllZerosV))]>;
 }
 
+let Predicates = [NoVLX] in
 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
 
 
@@ -485,7 +486,7 @@ def : Pat<(v4i32 immAllZerosV), (V_SET0)
 // at the rename stage without using any execution unit, so SET0PSY
 // and SET0PDY can be used for vector int instructions without penalty
 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
-    isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
+    isPseudo = 1, Predicates = [HasAVX, NoVLX], SchedRW = [WriteZero] in {
 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
                  [(set VR256:$dst, (v8i32 immAllZerosV))]>;
 }

Modified: llvm/trunk/test/CodeGen/X86/avx512-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-arith.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-arith.ll Sun May  8 16:33:53 2016
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck --check-prefix=CHECK --check-prefix=AVX512F %s
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512vl | FileCheck --check-prefix=CHECK --check-prefix=AVX512VL %s
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512bw | FileCheck --check-prefix=CHECK --check-prefix=AVX512BW %s
@@ -682,7 +682,7 @@ define <8 x double> @test_mask_vminpd(<8
 ;
 ; AVX512VL-LABEL: test_mask_vminpd:
 ; AVX512VL:       ## BB#0:
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxord %ymm4, %ymm4, %ymm4
 ; AVX512VL-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; AVX512VL-NEXT:    vminpd %zmm2, %zmm1, %zmm0 {%k1}
 ; AVX512VL-NEXT:    retq
@@ -703,7 +703,7 @@ define <8 x double> @test_mask_vminpd(<8
 ;
 ; SKX-LABEL: test_mask_vminpd:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; SKX-NEXT:    vpxord %ymm4, %ymm4, %ymm4
 ; SKX-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; SKX-NEXT:    vminpd %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -742,7 +742,7 @@ define <8 x double> @test_mask_vmaxpd(<8
 ;
 ; AVX512VL-LABEL: test_mask_vmaxpd:
 ; AVX512VL:       ## BB#0:
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxord %ymm4, %ymm4, %ymm4
 ; AVX512VL-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; AVX512VL-NEXT:    vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
 ; AVX512VL-NEXT:    retq
@@ -763,7 +763,7 @@ define <8 x double> @test_mask_vmaxpd(<8
 ;
 ; SKX-LABEL: test_mask_vmaxpd:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; SKX-NEXT:    vpxord %ymm4, %ymm4, %ymm4
 ; SKX-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; SKX-NEXT:    vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll Sun May  8 16:33:53 2016
@@ -1,13 +1,18 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=ALL_X64 --check-prefix=KNL
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=ALL_X64 --check-prefix=SKX
 ; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=KNL_X32
 
 define <16 x i1> @test1() {
-; ALL_X64-LABEL: test1:
-; ALL_X64:       ## BB#0:
-; ALL_X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; ALL_X64-NEXT:    retq
+; KNL-LABEL: test1:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test1:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpxord %xmm0, %xmm0, %xmm0
+; SKX-NEXT:    retq
 ;
 ; KNL_X32-LABEL: test1:
 ; KNL_X32:       ## BB#0:

Modified: llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll Sun May  8 16:33:53 2016
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=KNL
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=SKX
 
@@ -127,7 +127,7 @@ define <4 x float> @test7(<4 x float> %a
 ;
 ; SKX-LABEL: test7:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vcmpltps %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vmovaps %xmm0, %xmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -148,7 +148,7 @@ define <2 x double> @test8(<2 x double>
 ;
 ; SKX-LABEL: test8:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vcmpltpd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vmovapd %xmm0, %xmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -969,7 +969,7 @@ define <4 x i32> @test44(<4 x i16> %x, <
 ;
 ; SKX-LABEL: test44:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
 ; SKX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
 ; SKX-NEXT:    vpcmpeqd %xmm1, %xmm0, %k0
@@ -992,7 +992,7 @@ define <2 x i64> @test45(<2 x i16> %x, <
 ;
 ; SKX-LABEL: test45:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
 ; SKX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
 ; SKX-NEXT:    vpcmpeqq %xmm1, %xmm0, %k1

Modified: llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll Sun May  8 16:33:53 2016
@@ -89,7 +89,7 @@ define <16 x i8>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm3 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9]
 ; CHECK-NEXT:    vpermt2b %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7d,0xda]
 ; CHECK-NEXT:    vpermt2b %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7d,0xca]
-; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpxord %xmm4, %xmm4, %xmm4 ## encoding: [0x62,0xf1,0x5d,0x08,0xef,0xe4]
 ; CHECK-NEXT:    vpermt2b %xmm2, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7d,0xe2]
 ; CHECK-NEXT:    vpaddb %xmm4, %xmm3, %xmm0 ## encoding: [0x62,0xf1,0x65,0x08,0xfc,0xc4]
 ; CHECK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xfc,0xc1]
@@ -111,7 +111,7 @@ define <32 x i8>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm3 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7d,0xda]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7d,0xca]
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxord %ymm4, %ymm4, %ymm4 ## encoding: [0x62,0xf1,0x5d,0x28,0xef,0xe4]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7d,0xe2]
 ; CHECK-NEXT:    vpaddb %ymm4, %ymm3, %ymm0 ## encoding: [0x62,0xf1,0x65,0x28,0xfc,0xc4]
 ; CHECK-NEXT:    vpaddb %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0xfc,0xc1]
@@ -133,7 +133,7 @@ define <16 x i8>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm3 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9]
 ; CHECK-NEXT:    vpermt2b %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7d,0xda]
 ; CHECK-NEXT:    vpermt2b %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7d,0xca]
-; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT:    vpxord %xmm4, %xmm4, %xmm4 ## encoding: [0x62,0xf1,0x5d,0x08,0xef,0xe4]
 ; CHECK-NEXT:    vpermt2b %xmm2, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7d,0xe2]
 ; CHECK-NEXT:    vpaddb %xmm4, %xmm3, %xmm0 ## encoding: [0x62,0xf1,0x65,0x08,0xfc,0xc4]
 ; CHECK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xfc,0xc1]
@@ -155,7 +155,7 @@ define <32 x i8>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm3 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7d,0xda]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7d,0xca]
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxord %ymm4, %ymm4, %ymm4 ## encoding: [0x62,0xf1,0x5d,0x28,0xef,0xe4]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7d,0xe2]
 ; CHECK-NEXT:    vpaddb %ymm4, %ymm3, %ymm0 ## encoding: [0x62,0xf1,0x65,0x28,0xfc,0xc4]
 ; CHECK-NEXT:    vpaddb %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0xfc,0xc1]

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll Sun May  8 16:33:53 2016
@@ -7929,7 +7929,7 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK: kmovw  %edi, %k1
 ; CHECK: vmovaps  %zmm0, %zmm3
 ; CHECK: vfixupimmpd  $5, %xmm2, %xmm1, %xmm3 {%k1} 
-; CHECK: vpxor  %xmm4, %xmm4, %xmm4
+; CHECK: vpxord  %xmm4, %xmm4, %xmm4
 ; CHECK: vfixupimmpd  $4, %xmm2, %xmm1, %xmm4 {%k1} {z} 
 ; CHECK: vfixupimmpd  $3, %xmm2, %xmm1, %xmm0 
 ; CHECK: vaddpd  %xmm4, %xmm3, %xmm1
@@ -7950,7 +7950,7 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK: kmovw  %edi, %k1
 ; CHECK: vmovaps  %zmm0, %zmm3
 ; CHECK: vfixupimmpd  $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
-; CHECK: vpxor  %xmm2, %xmm2, %xmm2
+; CHECK: vpxord  %xmm2, %xmm2, %xmm2
 ; CHECK: vfixupimmpd  $3, %xmm2, %xmm1, %xmm0 {%k1} {z}
 ; CHECK: vaddpd  %xmm0, %xmm3, %xmm0
   %res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4)
@@ -7968,7 +7968,7 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK: kmovw  %edi, %k1
 ; CHECK: vmovaps  %zmm0, %zmm3
 ; CHECK: vfixupimmpd  $4, %ymm2, %ymm1, %ymm3 {%k1} 
-; CHECK: vpxor  %ymm4, %ymm4, %ymm4
+; CHECK: vpxord  %ymm4, %ymm4, %ymm4
 ; CHECK: vfixupimmpd  $5, %ymm2, %ymm1, %ymm4 {%k1} {z} 
 ; CHECK: vfixupimmpd  $3, %ymm2, %ymm1, %ymm0 
 ; CHECK: vaddpd  %ymm4, %ymm3, %ymm1
@@ -7989,7 +7989,7 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK: kmovw  %edi, %k1
 ; CHECK: vmovaps  %zmm0, %zmm3
 ; CHECK: vfixupimmpd  $5, %ymm2, %ymm1, %ymm3 {%k1} {z}
-; CHECK: vpxor  %ymm4, %ymm4, %ymm4
+; CHECK: vpxord  %ymm4, %ymm4, %ymm4
 ; CHECK: vmovaps  %zmm0, %zmm5
 ; CHECK: vfixupimmpd  $4, %ymm4, %ymm1, %ymm5 {%k1} {z}
 ; CHECK: vfixupimmpd  $3, %ymm2, %ymm1, %ymm0 
@@ -8013,7 +8013,7 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK: vfixupimmps  $5, %xmm2, %xmm1, %xmm3 {%k1} 
 ; CHECK: vmovaps  %zmm0, %zmm4
 ; CHECK: vfixupimmps  $5, %xmm2, %xmm1, %xmm4 
-; CHECK: vpxor  %xmm2, %xmm2, %xmm2
+; CHECK: vpxord  %xmm2, %xmm2, %xmm2
 ; CHECK: vfixupimmps  $5, %xmm2, %xmm1, %xmm0 {%k1} 
 ; CHECK: vaddps  %xmm0, %xmm3, %xmm0
 ; CHECK: vaddps  %xmm4, %xmm0, %xmm0
@@ -8035,7 +8035,7 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK: vfixupimmps  $5, %xmm2, %xmm1, %xmm3 {%k1} {z} 
 ; CHECK: vmovaps  %zmm0, %zmm4
 ; CHECK: vfixupimmps  $5, %xmm2, %xmm1, %xmm4 
-; CHECK: vpxor  %xmm2, %xmm2, %xmm2
+; CHECK: vpxord  %xmm2, %xmm2, %xmm2
 ; CHECK: vfixupimmps  $5, %xmm2, %xmm1, %xmm0 {%k1} {z} 
 ; CHECK: vaddps  %xmm0, %xmm3, %xmm0
 ; CHECK: vaddps  %xmm4, %xmm0, %xmm0
@@ -8057,7 +8057,7 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK: vfixupimmps  $5, %ymm2, %ymm1, %ymm3 {%k1} 
 ; CHECK: vmovaps  %zmm0, %zmm4
 ; CHECK: vfixupimmps  $5, %ymm2, %ymm1, %ymm4 
-; CHECK: vpxor  %ymm2, %ymm2, %ymm2
+; CHECK: vpxord  %ymm2, %ymm2, %ymm2
 ; CHECK: vfixupimmps  $5, %ymm2, %ymm1, %ymm0 {%k1} 
 ; CHECK: vaddps  %ymm0, %ymm3, %ymm0
 ; CHECK: vaddps  %ymm4, %ymm0, %ymm0
@@ -8079,7 +8079,7 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK: vfixupimmps  $5, %ymm2, %ymm1, %ymm3 {%k1} {z} 
 ; CHECK: vmovaps  %zmm0, %zmm4
 ; CHECK: vfixupimmps  $5, %ymm2, %ymm1, %ymm4 
-; CHECK: vpxor  %ymm2, %ymm2, %ymm2
+; CHECK: vpxord  %ymm2, %ymm2, %ymm2
 ; CHECK: vfixupimmps  $5, %ymm2, %ymm1, %ymm0 {%k1} {z} 
 ; CHECK: vaddps  %ymm0, %ymm3, %ymm0
 ; CHECK: vaddps  %ymm4, %ymm0, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll Sun May  8 16:33:53 2016
@@ -73,7 +73,7 @@ define   <8 x float> @_inreg8xfloat(floa
 define   <8 x float> @_ss8xfloat_mask(<8 x float> %i, float %a, <8 x i32> %mask1) {
 ; CHECK-LABEL: _ss8xfloat_mask:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; CHECK-NEXT:    vpxord %ymm3, %ymm3, %ymm3
 ; CHECK-NEXT:    vpcmpneqd %ymm3, %ymm2, %k1
 ; CHECK-NEXT:    vbroadcastss %xmm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -87,7 +87,7 @@ define   <8 x float> @_ss8xfloat_mask(<8
 define   <8 x float> @_ss8xfloat_maskz(float %a, <8 x i32> %mask1) {
 ; CHECK-LABEL: _ss8xfloat_maskz:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpxord %ymm2, %ymm2, %ymm2
 ; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1
 ; CHECK-NEXT:    vbroadcastss %xmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -111,7 +111,7 @@ define   <4 x float> @_inreg4xfloat(floa
 define   <4 x float> @_ss4xfloat_mask(<4 x float> %i, float %a, <4 x i32> %mask1) {
 ; CHECK-LABEL: _ss4xfloat_mask:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; CHECK-NEXT:    vpxord %xmm3, %xmm3, %xmm3
 ; CHECK-NEXT:    vpcmpneqd %xmm3, %xmm2, %k1
 ; CHECK-NEXT:    vbroadcastss %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -125,7 +125,7 @@ define   <4 x float> @_ss4xfloat_mask(<4
 define   <4 x float> @_ss4xfloat_maskz(float %a, <4 x i32> %mask1) {
 ; CHECK-LABEL: _ss4xfloat_maskz:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqd %xmm2, %xmm1, %k1
 ; CHECK-NEXT:    vbroadcastss %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -149,7 +149,7 @@ define   <4 x double> @_inreg4xdouble(do
 define   <4 x double> @_ss4xdouble_mask(<4 x double> %i, double %a, <4 x i32> %mask1) {
 ; CHECK-LABEL: _ss4xdouble_mask:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; CHECK-NEXT:    vpxord %xmm3, %xmm3, %xmm3
 ; CHECK-NEXT:    vpcmpneqd %xmm3, %xmm2, %k1
 ; CHECK-NEXT:    vbroadcastsd %xmm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -163,7 +163,7 @@ define   <4 x double> @_ss4xdouble_mask(
 define   <4 x double> @_ss4xdouble_maskz(double %a, <4 x i32> %mask1) {
 ; CHECK-LABEL: _ss4xdouble_maskz:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqd %xmm2, %xmm1, %k1
 ; CHECK-NEXT:    vbroadcastsd %xmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fma_patterns.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma_patterns.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma_patterns.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma_patterns.ll Sun May  8 16:33:53 2016
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4,+fma -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA4
@@ -1150,7 +1151,7 @@ define <4 x float> @test_v4f32_fneg_fmul
 ;
 ; AVX512-LABEL: test_v4f32_fneg_fmul:
 ; AVX512:       # BB#0:
-; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %m = fmul nsz <4 x float> %x, %y
@@ -1173,7 +1174,7 @@ define <4 x double> @test_v4f64_fneg_fmu
 ;
 ; AVX512-LABEL: test_v4f64_fneg_fmul:
 ; AVX512:       # BB#0:
-; AVX512-NEXT:    vxorps %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpxord %ymm2, %ymm2, %ymm2
 ; AVX512-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %m = fmul nsz <4 x double> %x, %y

Modified: llvm/trunk/test/CodeGen/X86/masked_memop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_memop.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_memop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_memop.ll Sun May  8 16:33:53 2016
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx < %s | FileCheck %s --check-prefix=AVX  --check-prefix=AVX1
 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx2 < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f < %s | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
@@ -204,7 +204,7 @@ define <8 x double> @test5(<8 x i32> %tr
 ;
 ; SKX-LABEL: test5:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; SKX-NEXT:    vpxord %ymm2, %ymm2, %ymm2
 ; SKX-NEXT:    vpcmpeqd %ymm2, %ymm0, %k1
 ; SKX-NEXT:    vmovupd (%rdi), %zmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -233,7 +233,7 @@ define <2 x double> @test6(<2 x i64> %tr
 ;
 ; SKX-LABEL: test6:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vmovupd (%rdi), %xmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -262,7 +262,7 @@ define <4 x float> @test7(<4 x i32> %tri
 ;
 ; SKX-LABEL: test7:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vmovups (%rdi), %xmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -299,7 +299,7 @@ define <4 x i32> @test8(<4 x i32> %trigg
 ;
 ; SKX-LABEL: test8:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vmovdqu32 (%rdi), %xmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -333,7 +333,7 @@ define void @test9(<4 x i32> %trigger, <
 ;
 ; SKX-LABEL: test9:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vmovdqu32 %xmm1, (%rdi) {%k1}
 ; SKX-NEXT:    retq
@@ -375,7 +375,7 @@ define <4 x double> @test10(<4 x i32> %t
 ;
 ; SKX-LABEL: test10:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vmovapd (%rdi), %ymm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -415,7 +415,7 @@ define <4 x double> @test10b(<4 x i32> %
 ;
 ; SKX-LABEL: test10b:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; SKX-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; SKX-NEXT:    vpcmpeqd %xmm1, %xmm0, %k1
 ; SKX-NEXT:    vmovapd (%rdi), %ymm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -456,7 +456,7 @@ define <8 x float> @test11a(<8 x i32> %t
 ;
 ; SKX-LABEL: test11a:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; SKX-NEXT:    vpxord %ymm2, %ymm2, %ymm2
 ; SKX-NEXT:    vpcmpeqd %ymm2, %ymm0, %k1
 ; SKX-NEXT:    vmovaps (%rdi), %ymm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -624,7 +624,7 @@ define void @test12(<8 x i32> %trigger,
 ;
 ; SKX-LABEL: test12:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; SKX-NEXT:    vpxord %ymm2, %ymm2, %ymm2
 ; SKX-NEXT:    vpcmpeqd %ymm2, %ymm0, %k1
 ; SKX-NEXT:    vmovdqu32 %ymm1, (%rdi) {%k1}
 ; SKX-NEXT:    retq
@@ -704,7 +704,7 @@ define void @test14(<2 x i32> %trigger,
 ;
 ; SKX-LABEL: test14:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k0
 ; SKX-NEXT:    kshiftlw $14, %k0, %k0
@@ -752,7 +752,7 @@ define void @test15(<2 x i32> %trigger,
 ;
 ; SKX-LABEL: test15:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vpmovqd %xmm1, (%rdi) {%k1}
@@ -798,7 +798,7 @@ define <2 x float> @test16(<2 x i32> %tr
 ;
 ; SKX-LABEL: test16:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k0
 ; SKX-NEXT:    kshiftlw $14, %k0, %k0
@@ -853,7 +853,7 @@ define <2 x i32> @test17(<2 x i32> %trig
 ;
 ; SKX-LABEL: test17:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpxord %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k0
 ; SKX-NEXT:    kshiftlw $14, %k0, %k0
@@ -900,7 +900,7 @@ define <2 x float> @test18(<2 x i32> %tr
 ;
 ; SKX-LABEL: test18:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; SKX-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; SKX-NEXT:    vpcmpeqq %xmm1, %xmm0, %k0
 ; SKX-NEXT:    kshiftlw $14, %k0, %k0
@@ -1317,12 +1317,12 @@ define <4 x i32> @load_one_mask_bit_set1
 define <4 x float> @load_one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) {
 ; AVX-LABEL: load_one_mask_bit_set2:
 ; AVX:       ## BB#0:
-; AVX-NEXT:    vinsertps $32, 8(%rdi), %xmm0, %xmm0 ## xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: load_one_mask_bit_set2:
 ; AVX512:       ## BB#0:
-; AVX512-NEXT:    vinsertps $32, 8(%rdi), %xmm0, %xmm0 ## xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; AVX512-NEXT:    retq
   %res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>, <4 x float> %val)
   ret <4 x float> %res
@@ -1368,21 +1368,21 @@ define <4 x double> @load_one_mask_bit_s
 ; AVX-LABEL: load_one_mask_bit_set4:
 ; AVX:       ## BB#0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vmovhpd 24(%rdi), %xmm1, %xmm1  ## xmm1 = xmm1[0],mem[0]
+; AVX-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: load_one_mask_bit_set4:
 ; AVX512F:       ## BB#0:
 ; AVX512F-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX512F-NEXT:    vmovhpd 24(%rdi), %xmm1, %xmm1  ## xmm1 = xmm1[0],mem[0]
+; AVX512F-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: load_one_mask_bit_set4:
 ; SKX:       ## BB#0:
 ; SKX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; SKX-NEXT:    vmovhpd 24(%rdi), %xmm1, %xmm1  ## xmm1 = xmm1[0],mem[0]
+; SKX-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; SKX-NEXT:    vinsertf32x4 $1, %xmm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %res = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 false, i1 true>, <4 x double> %val)

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll Sun May  8 16:33:53 2016
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
@@ -762,7 +763,7 @@ define <2 x i64> @shuffle_v2i64_z1(<2 x
 ;
 ; AVX512VL-LABEL: shuffle_v2i64_z1:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX512VL-NEXT:    retq
   %shuffle = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32> <i32 2, i32 1>
@@ -804,7 +805,7 @@ define <2 x double> @shuffle_v2f64_1z(<2
 ;
 ; AVX512VL-LABEL: shuffle_v2f64_1z:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; AVX512VL-NEXT:    retq
   %shuffle = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> <i32 1, i32 3>
@@ -833,7 +834,7 @@ define <2 x double> @shuffle_v2f64_z0(<2
 ;
 ; AVX512VL-LABEL: shuffle_v2f64_z0:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; AVX512VL-NEXT:    retq
   %shuffle = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> <i32 2, i32 0>
@@ -865,11 +866,23 @@ define <2 x double> @shuffle_v2f64_z1(<2
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
-; AVX-LABEL: shuffle_v2f64_z1:
-; AVX:       # BB#0:
-; AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; AVX-NEXT:    retq
+; AVX1-LABEL: shuffle_v2f64_z1:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: shuffle_v2f64_z1:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX2-NEXT:    retq
+;
+; AVX512VL-LABEL: shuffle_v2f64_z1:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX512VL-NEXT:    retq
   %shuffle = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> <i32 2, i32 1>
   ret <2 x double> %shuffle
 }
@@ -895,7 +908,7 @@ define <2 x double> @shuffle_v2f64_bitca
 ;
 ; AVX512VL-LABEL: shuffle_v2f64_bitcast_1z:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
 ; AVX512VL-NEXT:    retq
   %shuffle64 = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> <i32 2, i32 1>
@@ -947,7 +960,7 @@ define <2 x i64> @shuffle_v2i64_bitcast_
 ;
 ; AVX512VL-LABEL: shuffle_v2i64_bitcast_z123:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; AVX512VL-NEXT:    retq
   %bitcast32 = bitcast <2 x i64> %x to <4 x float>

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll Sun May  8 16:33:53 2016
@@ -192,7 +192,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ;
 ; AVX512CDVL-LABEL: testv2i64u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpandq %xmm1, %xmm0, %xmm0
 ; AVX512CDVL-NEXT:    vplzcntq %xmm0, %xmm0
@@ -388,7 +388,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ;
 ; AVX512CDVL-LABEL: testv4i32:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; AVX512CDVL-NEXT:    vpandd %xmm2, %xmm0, %xmm0
 ; AVX512CDVL-NEXT:    vpsubd {{.*}}(%rip){1to4}, %xmm0, %xmm0
@@ -611,7 +611,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ;
 ; AVX512CDVL-LABEL: testv4i32u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpandd %xmm1, %xmm0, %xmm0
 ; AVX512CDVL-NEXT:    vplzcntd %xmm0, %xmm0
@@ -794,7 +794,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ;
 ; AVX512CDVL-LABEL: testv8i16:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpandq %xmm1, %xmm0, %xmm0
 ; AVX512CDVL-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm0
@@ -992,7 +992,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ;
 ; AVX512CDVL-LABEL: testv8i16u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpandq %xmm1, %xmm0, %xmm0
 ; AVX512CDVL-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm0
@@ -1168,7 +1168,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ;
 ; AVX512CDVL-LABEL: testv16i8:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpandq %xmm1, %xmm0, %xmm0
 ; AVX512CDVL-NEXT:    vpsubb {{.*}}(%rip), %xmm0, %xmm0
@@ -1334,7 +1334,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ;
 ; AVX512CDVL-LABEL: testv16i8u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT:    vpxord %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpandq %xmm1, %xmm0, %xmm0
 ; AVX512CDVL-NEXT:    vpsubb {{.*}}(%rip), %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll?rev=268884&r1=268883&r2=268884&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll Sun May  8 16:33:53 2016
@@ -56,7 +56,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ;
 ; AVX512CDVL-LABEL: testv4i64:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxord %ymm1, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; AVX512CDVL-NEXT:    vpandq %ymm2, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpsubq {{.*}}(%rip){1to4}, %ymm0, %ymm0
@@ -144,7 +144,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ;
 ; AVX512CDVL-LABEL: testv4i64u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxord %ymm1, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpandq %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vplzcntq %ymm0, %ymm0
@@ -229,7 +229,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ;
 ; AVX512CDVL-LABEL: testv8i32:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxord %ymm1, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; AVX512CDVL-NEXT:    vpandd %ymm2, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpsubd {{.*}}(%rip){1to8}, %ymm0, %ymm0
@@ -337,7 +337,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ;
 ; AVX512CDVL-LABEL: testv8i32u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxord %ymm1, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpsubd %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpandd %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vplzcntd %ymm0, %ymm0
@@ -415,7 +415,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ;
 ; AVX512CDVL-LABEL: testv16i16:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxord %ymm1, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpandq %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
@@ -511,7 +511,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ;
 ; AVX512CDVL-LABEL: testv16i16u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxord %ymm1, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpandq %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
@@ -598,7 +598,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ;
 ; AVX512CDVL-LABEL: testv32i8:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxord %ymm1, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpandq %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpsubb {{.*}}(%rip), %ymm0, %ymm0
@@ -679,7 +679,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ;
 ; AVX512CDVL-LABEL: testv32i8u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxord %ymm1, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpandq %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpsubb {{.*}}(%rip), %ymm0, %ymm0




More information about the llvm-commits mailing list