[llvm] r309926 - [X86] SET0 to use XMM registers where possible PR26018 PR32862

Dinar Temirbulatov via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 3 01:50:18 PDT 2017


Author: dinar
Date: Thu Aug  3 01:50:18 2017
New Revision: 309926

URL: http://llvm.org/viewvc/llvm-project?rev=309926&view=rev
Log:
[X86] SET0 to use XMM registers where possible PR26018 PR32862

Differential Revision: https://reviews.llvm.org/D35965

Modified:
    llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
    llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
    llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
    llvm/trunk/test/CodeGen/X86/avx512-arith.ll
    llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll
    llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
    llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll
    llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll
    llvm/trunk/test/CodeGen/X86/avx512-mov.ll
    llvm/trunk/test/CodeGen/X86/avx512-select.ll
    llvm/trunk/test/CodeGen/X86/avx512-skx-insert-subvec.ll
    llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll
    llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll
    llvm/trunk/test/CodeGen/X86/avx512-vselect-crash.ll
    llvm/trunk/test/CodeGen/X86/avx512-vselect.ll
    llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll
    llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll
    llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
    llvm/trunk/test/CodeGen/X86/compress_expand.ll
    llvm/trunk/test/CodeGen/X86/fma_patterns.ll
    llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll
    llvm/trunk/test/CodeGen/X86/madd.ll
    llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll
    llvm/trunk/test/CodeGen/X86/masked_memop.ll
    llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll
    llvm/trunk/test/CodeGen/X86/nontemporal-2.ll
    llvm/trunk/test/CodeGen/X86/nontemporal-loads.ll
    llvm/trunk/test/CodeGen/X86/sad.ll
    llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll
    llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll
    llvm/trunk/test/CodeGen/X86/vector-popcnt-512.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v32.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v16.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v64.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-avx512.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll
    llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Thu Aug  3 01:50:18 2017
@@ -7723,7 +7723,8 @@ bool X86InstrInfo::expandPostRAPseudo(Ma
       return Expand2AddrUndef(MIB,
                               get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
     // Extended register without VLX. Use a larger XOR.
-    SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
+    SrcReg =
+        TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
     MIB->getOperand(0).setReg(SrcReg);
     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
   }
@@ -7731,20 +7732,24 @@ bool X86InstrInfo::expandPostRAPseudo(Ma
     bool HasVLX = Subtarget.hasVLX();
     unsigned SrcReg = MIB->getOperand(0).getReg();
     const TargetRegisterInfo *TRI = &getRegisterInfo();
-    if (HasVLX)
-      return Expand2AddrUndef(MIB, get(X86::VPXORDZ256rr));
+    if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
+      unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
+      MIB->getOperand(0).setReg(XReg);
+      return Expand2AddrUndef(MIB,
+                              get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
+    }
+    return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
+  }
+  case X86::AVX512_512_SET0: {
+    const TargetRegisterInfo *TRI = &getRegisterInfo();
+    unsigned SrcReg = MIB->getOperand(0).getReg();
     if (TRI->getEncodingValue(SrcReg) < 16) {
       unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
       MIB->getOperand(0).setReg(XReg);
       return Expand2AddrUndef(MIB, get(X86::VXORPSrr));
     }
-    // Extended register without VLX. Use a larger XOR.
-    SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
-    MIB->getOperand(0).setReg(SrcReg);
     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
   }
-  case X86::AVX512_512_SET0:
-    return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
   case X86::V_SETALLONES:
     return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
   case X86::AVX2_SETALLONES:

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll Thu Aug  3 01:50:18 2017
@@ -984,7 +984,7 @@ define void @movnt_pd(i8* %p, <4 x doubl
 ; AVX512VL-LABEL: movnt_pd:
 ; AVX512VL:       # BB#0:
 ; AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0x57,0xc9]
+; AVX512VL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
 ; AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
 ; AVX512VL-NEXT:    vmovntpd %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x00]
 ; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]

Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll Thu Aug  3 01:50:18 2017
@@ -1135,99 +1135,52 @@ eintry:
 }
 
 define void @isel_crash_32b(i8* %cV_R.addr) {
-; X32-AVX2-LABEL: isel_crash_32b:
-; X32-AVX2:       ## BB#0: ## %eintry
-; X32-AVX2-NEXT:    pushl %ebp
-; X32-AVX2-NEXT:  Lcfi1:
-; X32-AVX2-NEXT:    .cfi_def_cfa_offset 8
-; X32-AVX2-NEXT:  Lcfi2:
-; X32-AVX2-NEXT:    .cfi_offset %ebp, -8
-; X32-AVX2-NEXT:    movl %esp, %ebp
-; X32-AVX2-NEXT:  Lcfi3:
-; X32-AVX2-NEXT:    .cfi_def_cfa_register %ebp
-; X32-AVX2-NEXT:    andl $-32, %esp
-; X32-AVX2-NEXT:    subl $128, %esp
-; X32-AVX2-NEXT:    movl 8(%ebp), %eax
-; X32-AVX2-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX2-NEXT:    vmovaps %ymm0, (%esp)
-; X32-AVX2-NEXT:    vpbroadcastb (%eax), %ymm1
-; X32-AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT:    movl %ebp, %esp
-; X32-AVX2-NEXT:    popl %ebp
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
-;
-; X64-AVX2-LABEL: isel_crash_32b:
-; X64-AVX2:       ## BB#0: ## %eintry
-; X64-AVX2-NEXT:    pushq %rbp
-; X64-AVX2-NEXT:  Lcfi0:
-; X64-AVX2-NEXT:    .cfi_def_cfa_offset 16
-; X64-AVX2-NEXT:  Lcfi1:
-; X64-AVX2-NEXT:    .cfi_offset %rbp, -16
-; X64-AVX2-NEXT:    movq %rsp, %rbp
-; X64-AVX2-NEXT:  Lcfi2:
-; X64-AVX2-NEXT:    .cfi_def_cfa_register %rbp
-; X64-AVX2-NEXT:    andq $-32, %rsp
-; X64-AVX2-NEXT:    subq $128, %rsp
-; X64-AVX2-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovaps %ymm0, (%rsp)
-; X64-AVX2-NEXT:    movb (%rdi), %al
-; X64-AVX2-NEXT:    vmovd %eax, %xmm1
-; X64-AVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
-; X64-AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX2-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%rsp)
-; X64-AVX2-NEXT:    movq %rbp, %rsp
-; X64-AVX2-NEXT:    popq %rbp
-; X64-AVX2-NEXT:    vzeroupper
-; X64-AVX2-NEXT:    retq
-;
-; X32-AVX512VL-LABEL: isel_crash_32b:
-; X32-AVX512VL:       ## BB#0: ## %eintry
-; X32-AVX512VL-NEXT:    pushl %ebp
-; X32-AVX512VL-NEXT:  Lcfi1:
-; X32-AVX512VL-NEXT:    .cfi_def_cfa_offset 8
-; X32-AVX512VL-NEXT:  Lcfi2:
-; X32-AVX512VL-NEXT:    .cfi_offset %ebp, -8
-; X32-AVX512VL-NEXT:    movl %esp, %ebp
-; X32-AVX512VL-NEXT:  Lcfi3:
-; X32-AVX512VL-NEXT:    .cfi_def_cfa_register %ebp
-; X32-AVX512VL-NEXT:    andl $-32, %esp
-; X32-AVX512VL-NEXT:    subl $128, %esp
-; X32-AVX512VL-NEXT:    movl 8(%ebp), %eax
-; X32-AVX512VL-NEXT:    vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX512VL-NEXT:    vmovaps %ymm0, (%esp)
-; X32-AVX512VL-NEXT:    vpbroadcastb (%eax), %ymm1
-; X32-AVX512VL-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT:    movl %ebp, %esp
-; X32-AVX512VL-NEXT:    popl %ebp
-; X32-AVX512VL-NEXT:    vzeroupper
-; X32-AVX512VL-NEXT:    retl
+; X32-LABEL: isel_crash_32b:
+; X32:       ## BB#0: ## %eintry
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:  Lcfi1:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:  Lcfi2:
+; X32-NEXT:    .cfi_offset %ebp, -8
+; X32-NEXT:    movl %esp, %ebp
+; X32-NEXT:  Lcfi3:
+; X32-NEXT:    .cfi_def_cfa_register %ebp
+; X32-NEXT:    andl $-32, %esp
+; X32-NEXT:    subl $128, %esp
+; X32-NEXT:    movl 8(%ebp), %eax
+; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X32-NEXT:    vmovaps %ymm0, (%esp)
+; X32-NEXT:    vpbroadcastb (%eax), %ymm1
+; X32-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X32-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
+; X32-NEXT:    movl %ebp, %esp
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    vzeroupper
+; X32-NEXT:    retl
 ;
-; X64-AVX512VL-LABEL: isel_crash_32b:
-; X64-AVX512VL:       ## BB#0: ## %eintry
-; X64-AVX512VL-NEXT:    pushq %rbp
-; X64-AVX512VL-NEXT:  Lcfi0:
-; X64-AVX512VL-NEXT:    .cfi_def_cfa_offset 16
-; X64-AVX512VL-NEXT:  Lcfi1:
-; X64-AVX512VL-NEXT:    .cfi_offset %rbp, -16
-; X64-AVX512VL-NEXT:    movq %rsp, %rbp
-; X64-AVX512VL-NEXT:  Lcfi2:
-; X64-AVX512VL-NEXT:    .cfi_def_cfa_register %rbp
-; X64-AVX512VL-NEXT:    andq $-32, %rsp
-; X64-AVX512VL-NEXT:    subq $128, %rsp
-; X64-AVX512VL-NEXT:    vxorps %ymm0, %ymm0, %ymm0
-; X64-AVX512VL-NEXT:    vmovaps %ymm0, (%rsp)
-; X64-AVX512VL-NEXT:    movb (%rdi), %al
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm1
-; X64-AVX512VL-NEXT:    vpbroadcastb %xmm1, %ymm1
-; X64-AVX512VL-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT:    movq %rbp, %rsp
-; X64-AVX512VL-NEXT:    popq %rbp
-; X64-AVX512VL-NEXT:    vzeroupper
-; X64-AVX512VL-NEXT:    retq
+; X64-LABEL: isel_crash_32b:
+; X64:       ## BB#0: ## %eintry
+; X64-NEXT:    pushq %rbp
+; X64-NEXT:  Lcfi0:
+; X64-NEXT:    .cfi_def_cfa_offset 16
+; X64-NEXT:  Lcfi1:
+; X64-NEXT:    .cfi_offset %rbp, -16
+; X64-NEXT:    movq %rsp, %rbp
+; X64-NEXT:  Lcfi2:
+; X64-NEXT:    .cfi_def_cfa_register %rbp
+; X64-NEXT:    andq $-32, %rsp
+; X64-NEXT:    subq $128, %rsp
+; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X64-NEXT:    vmovaps %ymm0, (%rsp)
+; X64-NEXT:    movb (%rdi), %al
+; X64-NEXT:    vmovd %eax, %xmm1
+; X64-NEXT:    vpbroadcastb %xmm1, %ymm1
+; X64-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; X64-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%rsp)
+; X64-NEXT:    movq %rbp, %rsp
+; X64-NEXT:    popq %rbp
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
 eintry:
   %__a.addr.i = alloca <4 x i64>, align 16
   %__b.addr.i = alloca <4 x i64>, align 16
@@ -1284,99 +1237,52 @@ entry:
 }
 
 define void @isel_crash_16w(i16* %cV_R.addr) {
-; X32-AVX2-LABEL: isel_crash_16w:
-; X32-AVX2:       ## BB#0: ## %eintry
-; X32-AVX2-NEXT:    pushl %ebp
-; X32-AVX2-NEXT:  Lcfi5:
-; X32-AVX2-NEXT:    .cfi_def_cfa_offset 8
-; X32-AVX2-NEXT:  Lcfi6:
-; X32-AVX2-NEXT:    .cfi_offset %ebp, -8
-; X32-AVX2-NEXT:    movl %esp, %ebp
-; X32-AVX2-NEXT:  Lcfi7:
-; X32-AVX2-NEXT:    .cfi_def_cfa_register %ebp
-; X32-AVX2-NEXT:    andl $-32, %esp
-; X32-AVX2-NEXT:    subl $128, %esp
-; X32-AVX2-NEXT:    movl 8(%ebp), %eax
-; X32-AVX2-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX2-NEXT:    vmovaps %ymm0, (%esp)
-; X32-AVX2-NEXT:    vpbroadcastw (%eax), %ymm1
-; X32-AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT:    movl %ebp, %esp
-; X32-AVX2-NEXT:    popl %ebp
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
-;
-; X64-AVX2-LABEL: isel_crash_16w:
-; X64-AVX2:       ## BB#0: ## %eintry
-; X64-AVX2-NEXT:    pushq %rbp
-; X64-AVX2-NEXT:  Lcfi3:
-; X64-AVX2-NEXT:    .cfi_def_cfa_offset 16
-; X64-AVX2-NEXT:  Lcfi4:
-; X64-AVX2-NEXT:    .cfi_offset %rbp, -16
-; X64-AVX2-NEXT:    movq %rsp, %rbp
-; X64-AVX2-NEXT:  Lcfi5:
-; X64-AVX2-NEXT:    .cfi_def_cfa_register %rbp
-; X64-AVX2-NEXT:    andq $-32, %rsp
-; X64-AVX2-NEXT:    subq $128, %rsp
-; X64-AVX2-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovaps %ymm0, (%rsp)
-; X64-AVX2-NEXT:    movw (%rdi), %ax
-; X64-AVX2-NEXT:    vmovd %eax, %xmm1
-; X64-AVX2-NEXT:    vpbroadcastw %xmm1, %ymm1
-; X64-AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX2-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%rsp)
-; X64-AVX2-NEXT:    movq %rbp, %rsp
-; X64-AVX2-NEXT:    popq %rbp
-; X64-AVX2-NEXT:    vzeroupper
-; X64-AVX2-NEXT:    retq
-;
-; X32-AVX512VL-LABEL: isel_crash_16w:
-; X32-AVX512VL:       ## BB#0: ## %eintry
-; X32-AVX512VL-NEXT:    pushl %ebp
-; X32-AVX512VL-NEXT:  Lcfi5:
-; X32-AVX512VL-NEXT:    .cfi_def_cfa_offset 8
-; X32-AVX512VL-NEXT:  Lcfi6:
-; X32-AVX512VL-NEXT:    .cfi_offset %ebp, -8
-; X32-AVX512VL-NEXT:    movl %esp, %ebp
-; X32-AVX512VL-NEXT:  Lcfi7:
-; X32-AVX512VL-NEXT:    .cfi_def_cfa_register %ebp
-; X32-AVX512VL-NEXT:    andl $-32, %esp
-; X32-AVX512VL-NEXT:    subl $128, %esp
-; X32-AVX512VL-NEXT:    movl 8(%ebp), %eax
-; X32-AVX512VL-NEXT:    vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX512VL-NEXT:    vmovaps %ymm0, (%esp)
-; X32-AVX512VL-NEXT:    vpbroadcastw (%eax), %ymm1
-; X32-AVX512VL-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT:    movl %ebp, %esp
-; X32-AVX512VL-NEXT:    popl %ebp
-; X32-AVX512VL-NEXT:    vzeroupper
-; X32-AVX512VL-NEXT:    retl
+; X32-LABEL: isel_crash_16w:
+; X32:       ## BB#0: ## %eintry
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:  Lcfi5:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:  Lcfi6:
+; X32-NEXT:    .cfi_offset %ebp, -8
+; X32-NEXT:    movl %esp, %ebp
+; X32-NEXT:  Lcfi7:
+; X32-NEXT:    .cfi_def_cfa_register %ebp
+; X32-NEXT:    andl $-32, %esp
+; X32-NEXT:    subl $128, %esp
+; X32-NEXT:    movl 8(%ebp), %eax
+; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X32-NEXT:    vmovaps %ymm0, (%esp)
+; X32-NEXT:    vpbroadcastw (%eax), %ymm1
+; X32-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X32-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
+; X32-NEXT:    movl %ebp, %esp
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    vzeroupper
+; X32-NEXT:    retl
 ;
-; X64-AVX512VL-LABEL: isel_crash_16w:
-; X64-AVX512VL:       ## BB#0: ## %eintry
-; X64-AVX512VL-NEXT:    pushq %rbp
-; X64-AVX512VL-NEXT:  Lcfi3:
-; X64-AVX512VL-NEXT:    .cfi_def_cfa_offset 16
-; X64-AVX512VL-NEXT:  Lcfi4:
-; X64-AVX512VL-NEXT:    .cfi_offset %rbp, -16
-; X64-AVX512VL-NEXT:    movq %rsp, %rbp
-; X64-AVX512VL-NEXT:  Lcfi5:
-; X64-AVX512VL-NEXT:    .cfi_def_cfa_register %rbp
-; X64-AVX512VL-NEXT:    andq $-32, %rsp
-; X64-AVX512VL-NEXT:    subq $128, %rsp
-; X64-AVX512VL-NEXT:    vxorps %ymm0, %ymm0, %ymm0
-; X64-AVX512VL-NEXT:    vmovaps %ymm0, (%rsp)
-; X64-AVX512VL-NEXT:    movw (%rdi), %ax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm1
-; X64-AVX512VL-NEXT:    vpbroadcastw %xmm1, %ymm1
-; X64-AVX512VL-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT:    movq %rbp, %rsp
-; X64-AVX512VL-NEXT:    popq %rbp
-; X64-AVX512VL-NEXT:    vzeroupper
-; X64-AVX512VL-NEXT:    retq
+; X64-LABEL: isel_crash_16w:
+; X64:       ## BB#0: ## %eintry
+; X64-NEXT:    pushq %rbp
+; X64-NEXT:  Lcfi3:
+; X64-NEXT:    .cfi_def_cfa_offset 16
+; X64-NEXT:  Lcfi4:
+; X64-NEXT:    .cfi_offset %rbp, -16
+; X64-NEXT:    movq %rsp, %rbp
+; X64-NEXT:  Lcfi5:
+; X64-NEXT:    .cfi_def_cfa_register %rbp
+; X64-NEXT:    andq $-32, %rsp
+; X64-NEXT:    subq $128, %rsp
+; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X64-NEXT:    vmovaps %ymm0, (%rsp)
+; X64-NEXT:    movw (%rdi), %ax
+; X64-NEXT:    vmovd %eax, %xmm1
+; X64-NEXT:    vpbroadcastw %xmm1, %ymm1
+; X64-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; X64-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%rsp)
+; X64-NEXT:    movq %rbp, %rsp
+; X64-NEXT:    popq %rbp
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
 eintry:
   %__a.addr.i = alloca <4 x i64>, align 16
   %__b.addr.i = alloca <4 x i64>, align 16
@@ -1443,28 +1349,28 @@ entry:
 }
 
 define void @isel_crash_8d(i32* %cV_R.addr) {
-; X32-AVX2-LABEL: isel_crash_8d:
-; X32-AVX2:       ## BB#0: ## %eintry
-; X32-AVX2-NEXT:    pushl %ebp
-; X32-AVX2-NEXT:  Lcfi9:
-; X32-AVX2-NEXT:    .cfi_def_cfa_offset 8
-; X32-AVX2-NEXT:  Lcfi10:
-; X32-AVX2-NEXT:    .cfi_offset %ebp, -8
-; X32-AVX2-NEXT:    movl %esp, %ebp
-; X32-AVX2-NEXT:  Lcfi11:
-; X32-AVX2-NEXT:    .cfi_def_cfa_register %ebp
-; X32-AVX2-NEXT:    andl $-32, %esp
-; X32-AVX2-NEXT:    subl $128, %esp
-; X32-AVX2-NEXT:    movl 8(%ebp), %eax
-; X32-AVX2-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX2-NEXT:    vmovaps %ymm0, (%esp)
-; X32-AVX2-NEXT:    vbroadcastss (%eax), %ymm1
-; X32-AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT:    vmovaps %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT:    movl %ebp, %esp
-; X32-AVX2-NEXT:    popl %ebp
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
+; X32-LABEL: isel_crash_8d:
+; X32:       ## BB#0: ## %eintry
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:  Lcfi9:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:  Lcfi10:
+; X32-NEXT:    .cfi_offset %ebp, -8
+; X32-NEXT:    movl %esp, %ebp
+; X32-NEXT:  Lcfi11:
+; X32-NEXT:    .cfi_def_cfa_register %ebp
+; X32-NEXT:    andl $-32, %esp
+; X32-NEXT:    subl $128, %esp
+; X32-NEXT:    movl 8(%ebp), %eax
+; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X32-NEXT:    vmovaps %ymm0, (%esp)
+; X32-NEXT:    vbroadcastss (%eax), %ymm1
+; X32-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X32-NEXT:    vmovaps %ymm1, {{[0-9]+}}(%esp)
+; X32-NEXT:    movl %ebp, %esp
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    vzeroupper
+; X32-NEXT:    retl
 ;
 ; X64-AVX2-LABEL: isel_crash_8d:
 ; X64-AVX2:       ## BB#0: ## %eintry
@@ -1490,29 +1396,6 @@ define void @isel_crash_8d(i32* %cV_R.ad
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
 ;
-; X32-AVX512VL-LABEL: isel_crash_8d:
-; X32-AVX512VL:       ## BB#0: ## %eintry
-; X32-AVX512VL-NEXT:    pushl %ebp
-; X32-AVX512VL-NEXT:  Lcfi9:
-; X32-AVX512VL-NEXT:    .cfi_def_cfa_offset 8
-; X32-AVX512VL-NEXT:  Lcfi10:
-; X32-AVX512VL-NEXT:    .cfi_offset %ebp, -8
-; X32-AVX512VL-NEXT:    movl %esp, %ebp
-; X32-AVX512VL-NEXT:  Lcfi11:
-; X32-AVX512VL-NEXT:    .cfi_def_cfa_register %ebp
-; X32-AVX512VL-NEXT:    andl $-32, %esp
-; X32-AVX512VL-NEXT:    subl $128, %esp
-; X32-AVX512VL-NEXT:    movl 8(%ebp), %eax
-; X32-AVX512VL-NEXT:    vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX512VL-NEXT:    vmovaps %ymm0, (%esp)
-; X32-AVX512VL-NEXT:    vbroadcastss (%eax), %ymm1
-; X32-AVX512VL-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT:    vmovaps %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT:    movl %ebp, %esp
-; X32-AVX512VL-NEXT:    popl %ebp
-; X32-AVX512VL-NEXT:    vzeroupper
-; X32-AVX512VL-NEXT:    retl
-;
 ; X64-AVX512VL-LABEL: isel_crash_8d:
 ; X64-AVX512VL:       ## BB#0: ## %eintry
 ; X64-AVX512VL-NEXT:    pushq %rbp
@@ -1525,7 +1408,7 @@ define void @isel_crash_8d(i32* %cV_R.ad
 ; X64-AVX512VL-NEXT:    .cfi_def_cfa_register %rbp
 ; X64-AVX512VL-NEXT:    andq $-32, %rsp
 ; X64-AVX512VL-NEXT:    subq $128, %rsp
-; X64-AVX512VL-NEXT:    vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX512VL-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX512VL-NEXT:    vmovaps %ymm0, (%rsp)
 ; X64-AVX512VL-NEXT:    movl (%rdi), %eax
 ; X64-AVX512VL-NEXT:    vpbroadcastd %eax, %ymm1
@@ -1605,34 +1488,34 @@ entry:
 }
 
 define void @isel_crash_4q(i64* %cV_R.addr) {
-; X32-AVX2-LABEL: isel_crash_4q:
-; X32-AVX2:       ## BB#0: ## %eintry
-; X32-AVX2-NEXT:    pushl %ebp
-; X32-AVX2-NEXT:  Lcfi13:
-; X32-AVX2-NEXT:    .cfi_def_cfa_offset 8
-; X32-AVX2-NEXT:  Lcfi14:
-; X32-AVX2-NEXT:    .cfi_offset %ebp, -8
-; X32-AVX2-NEXT:    movl %esp, %ebp
-; X32-AVX2-NEXT:  Lcfi15:
-; X32-AVX2-NEXT:    .cfi_def_cfa_register %ebp
-; X32-AVX2-NEXT:    andl $-32, %esp
-; X32-AVX2-NEXT:    subl $128, %esp
-; X32-AVX2-NEXT:    movl 8(%ebp), %eax
-; X32-AVX2-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX2-NEXT:    vmovaps %ymm0, (%esp)
-; X32-AVX2-NEXT:    movl (%eax), %ecx
-; X32-AVX2-NEXT:    movl 4(%eax), %eax
-; X32-AVX2-NEXT:    vmovd %ecx, %xmm1
-; X32-AVX2-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vpinsrd $2, %ecx, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; X32-AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT:    movl %ebp, %esp
-; X32-AVX2-NEXT:    popl %ebp
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
+; X32-LABEL: isel_crash_4q:
+; X32:       ## BB#0: ## %eintry
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:  Lcfi13:
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:  Lcfi14:
+; X32-NEXT:    .cfi_offset %ebp, -8
+; X32-NEXT:    movl %esp, %ebp
+; X32-NEXT:  Lcfi15:
+; X32-NEXT:    .cfi_def_cfa_register %ebp
+; X32-NEXT:    andl $-32, %esp
+; X32-NEXT:    subl $128, %esp
+; X32-NEXT:    movl 8(%ebp), %eax
+; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X32-NEXT:    vmovaps %ymm0, (%esp)
+; X32-NEXT:    movl (%eax), %ecx
+; X32-NEXT:    movl 4(%eax), %eax
+; X32-NEXT:    vmovd %ecx, %xmm1
+; X32-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-NEXT:    vpinsrd $2, %ecx, %xmm1, %xmm1
+; X32-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm1
+; X32-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
+; X32-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X32-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
+; X32-NEXT:    movl %ebp, %esp
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    vzeroupper
+; X32-NEXT:    retl
 ;
 ; X64-AVX2-LABEL: isel_crash_4q:
 ; X64-AVX2:       ## BB#0: ## %eintry
@@ -1658,35 +1541,6 @@ define void @isel_crash_4q(i64* %cV_R.ad
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
 ;
-; X32-AVX512VL-LABEL: isel_crash_4q:
-; X32-AVX512VL:       ## BB#0: ## %eintry
-; X32-AVX512VL-NEXT:    pushl %ebp
-; X32-AVX512VL-NEXT:  Lcfi13:
-; X32-AVX512VL-NEXT:    .cfi_def_cfa_offset 8
-; X32-AVX512VL-NEXT:  Lcfi14:
-; X32-AVX512VL-NEXT:    .cfi_offset %ebp, -8
-; X32-AVX512VL-NEXT:    movl %esp, %ebp
-; X32-AVX512VL-NEXT:  Lcfi15:
-; X32-AVX512VL-NEXT:    .cfi_def_cfa_register %ebp
-; X32-AVX512VL-NEXT:    andl $-32, %esp
-; X32-AVX512VL-NEXT:    subl $128, %esp
-; X32-AVX512VL-NEXT:    movl 8(%ebp), %eax
-; X32-AVX512VL-NEXT:    vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX512VL-NEXT:    vmovaps %ymm0, (%esp)
-; X32-AVX512VL-NEXT:    movl (%eax), %ecx
-; X32-AVX512VL-NEXT:    movl 4(%eax), %eax
-; X32-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X32-AVX512VL-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; X32-AVX512VL-NEXT:    vpinsrd $2, %ecx, %xmm1, %xmm1
-; X32-AVX512VL-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm1
-; X32-AVX512VL-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; X32-AVX512VL-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT:    vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT:    movl %ebp, %esp
-; X32-AVX512VL-NEXT:    popl %ebp
-; X32-AVX512VL-NEXT:    vzeroupper
-; X32-AVX512VL-NEXT:    retl
-;
 ; X64-AVX512VL-LABEL: isel_crash_4q:
 ; X64-AVX512VL:       ## BB#0: ## %eintry
 ; X64-AVX512VL-NEXT:    pushq %rbp
@@ -1699,7 +1553,7 @@ define void @isel_crash_4q(i64* %cV_R.ad
 ; X64-AVX512VL-NEXT:    .cfi_def_cfa_register %rbp
 ; X64-AVX512VL-NEXT:    andq $-32, %rsp
 ; X64-AVX512VL-NEXT:    subq $128, %rsp
-; X64-AVX512VL-NEXT:    vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX512VL-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX512VL-NEXT:    vmovaps %ymm0, (%rsp)
 ; X64-AVX512VL-NEXT:    movq (%rdi), %rax
 ; X64-AVX512VL-NEXT:    vpbroadcastq %rax, %ymm1

Modified: llvm/trunk/test/CodeGen/X86/avx512-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-arith.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-arith.ll Thu Aug  3 01:50:18 2017
@@ -401,7 +401,7 @@ define <16 x i32> @vpaddd_broadcast_test
 define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd_mask_test:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; CHECK-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
 ; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -414,7 +414,7 @@ define <16 x i32> @vpaddd_mask_test(<16
 define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd_maskz_test:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; CHECK-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
 ; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -427,7 +427,7 @@ define <16 x i32> @vpaddd_maskz_test(<16
 define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd_mask_fold_test:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; CHECK-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -441,7 +441,7 @@ define <16 x i32> @vpaddd_mask_fold_test
 define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd_mask_broadcast_test:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -454,7 +454,7 @@ define <16 x i32> @vpaddd_mask_broadcast
 define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd_maskz_fold_test:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; CHECK-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -468,7 +468,7 @@ define <16 x i32> @vpaddd_maskz_fold_tes
 define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd_maskz_broadcast_test:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -671,7 +671,7 @@ entry:
 define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
 ; CHECK-LABEL: test_mask_vaddps:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; CHECK-NEXT:    vaddps %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -686,7 +686,7 @@ define <16 x float> @test_mask_vaddps(<1
 define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
 ; CHECK-LABEL: test_mask_vmulps:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; CHECK-NEXT:    vmulps %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -701,7 +701,7 @@ define <16 x float> @test_mask_vmulps(<1
 define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
 ; CHECK-LABEL: test_mask_vminps:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; CHECK-NEXT:    vminps %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -725,7 +725,7 @@ define <8 x double> @test_mask_vminpd(<8
 ;
 ; AVX512VL-LABEL: test_mask_vminpd:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; AVX512VL-NEXT:    vminpd %zmm2, %zmm1, %zmm0 {%k1}
 ; AVX512VL-NEXT:    retq
@@ -748,7 +748,7 @@ define <8 x double> @test_mask_vminpd(<8
 ;
 ; SKX-LABEL: test_mask_vminpd:
 ; SKX:       # BB#0:
-; SKX-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; SKX-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; SKX-NEXT:    vminpd %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -764,7 +764,7 @@ define <8 x double> @test_mask_vminpd(<8
 define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
 ; CHECK-LABEL: test_mask_vmaxps:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; CHECK-NEXT:    vmaxps %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -788,7 +788,7 @@ define <8 x double> @test_mask_vmaxpd(<8
 ;
 ; AVX512VL-LABEL: test_mask_vmaxpd:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; AVX512VL-NEXT:    vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
 ; AVX512VL-NEXT:    retq
@@ -811,7 +811,7 @@ define <8 x double> @test_mask_vmaxpd(<8
 ;
 ; SKX-LABEL: test_mask_vmaxpd:
 ; SKX:       # BB#0:
-; SKX-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; SKX-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; SKX-NEXT:    vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -827,7 +827,7 @@ define <8 x double> @test_mask_vmaxpd(<8
 define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
 ; CHECK-LABEL: test_mask_vsubps:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; CHECK-NEXT:    vsubps %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -842,7 +842,7 @@ define <16 x float> @test_mask_vsubps(<1
 define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
 ; CHECK-LABEL: test_mask_vdivps:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; CHECK-NEXT:    vdivps %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -857,7 +857,7 @@ define <16 x float> @test_mask_vdivps(<1
 define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
 ; CHECK-LABEL: test_mask_vaddpd:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpcmpneqq %zmm4, %zmm3, %k1
 ; CHECK-NEXT:    vaddpd %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -872,7 +872,7 @@ define <8 x double> @test_mask_vaddpd(<8
 define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
 ; CHECK-LABEL: test_maskz_vaddpd:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; CHECK-NEXT:    vpcmpneqq %zmm3, %zmm2, %k1
 ; CHECK-NEXT:    vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -886,7 +886,7 @@ define <8 x double> @test_maskz_vaddpd(<
 define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
 ; CHECK-LABEL: test_mask_fold_vaddpd:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; CHECK-NEXT:    vpcmpneqq %zmm3, %zmm2, %k1
 ; CHECK-NEXT:    vaddpd (%rdi), %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -902,7 +902,7 @@ define <8 x double> @test_mask_fold_vadd
 define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
 ; CHECK-LABEL: test_maskz_fold_vaddpd:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1
 ; CHECK-NEXT:    vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -930,7 +930,7 @@ define <8 x double> @test_broadcast_vadd
 define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i,
 ; CHECK-LABEL: test_mask_broadcast_vaddpd:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vpcmpneqq %zmm0, %zmm2, %k1
 ; CHECK-NEXT:    vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %zmm1, %zmm0
@@ -949,7 +949,7 @@ define <8 x double> @test_mask_broadcast
 define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
 ; CHECK-LABEL: test_maskz_broadcast_vaddpd:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1
 ; CHECK-NEXT:    vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll Thu Aug  3 01:50:18 2017
@@ -16,7 +16,7 @@ define <16 x float> @test3(<4 x float> %
 ; CHECK:       ## BB#0:
 ; CHECK-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
 ; CHECK-NEXT:    vmovaps {{.*#+}} zmm2 = [0,1,2,3,4,18,16,7,8,9,10,11,12,13,14,15]
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Thu Aug  3 01:50:18 2017
@@ -1077,7 +1077,7 @@ define double @uitofp03(i32 %a) nounwind
 define <16 x float> @sitofp_16i1_float(<16 x i32> %a) {
 ; NODQ-LABEL: sitofp_16i1_float:
 ; NODQ:       # BB#0:
-; NODQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NODQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NODQ-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; NODQ-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; NODQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
@@ -1085,7 +1085,7 @@ define <16 x float> @sitofp_16i1_float(<
 ;
 ; DQ-LABEL: sitofp_16i1_float:
 ; DQ:       # BB#0:
-; DQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; DQ-NEXT:    vpcmpgtd %zmm0, %zmm1, %k0
 ; DQ-NEXT:    vpmovm2d %k0, %zmm0
 ; DQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
@@ -1140,7 +1140,7 @@ define <8 x double> @sitofp_8i8_double(<
 define <16 x double> @sitofp_16i1_double(<16 x double> %a) {
 ; NOVLDQ-LABEL: sitofp_16i1_double:
 ; NOVLDQ:       # BB#0:
-; NOVLDQ-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; NOVLDQ-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; NOVLDQ-NEXT:    vcmpltpd %zmm1, %zmm2, %k1
 ; NOVLDQ-NEXT:    vcmpltpd %zmm0, %zmm2, %k2
 ; NOVLDQ-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
@@ -1153,7 +1153,7 @@ define <16 x double> @sitofp_16i1_double
 ;
 ; VLDQ-LABEL: sitofp_16i1_double:
 ; VLDQ:       # BB#0:
-; VLDQ-NEXT:    vxorpd %zmm2, %zmm2, %zmm2
+; VLDQ-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; VLDQ-NEXT:    vcmpltpd %zmm1, %zmm2, %k0
 ; VLDQ-NEXT:    vcmpltpd %zmm0, %zmm2, %k1
 ; VLDQ-NEXT:    vpmovm2d %k1, %ymm0
@@ -1164,7 +1164,7 @@ define <16 x double> @sitofp_16i1_double
 ;
 ; VLNODQ-LABEL: sitofp_16i1_double:
 ; VLNODQ:       # BB#0:
-; VLNODQ-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; VLNODQ-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; VLNODQ-NEXT:    vcmpltpd %zmm1, %zmm2, %k1
 ; VLNODQ-NEXT:    vcmpltpd %zmm0, %zmm2, %k2
 ; VLNODQ-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -1176,7 +1176,7 @@ define <16 x double> @sitofp_16i1_double
 ;
 ; AVX512DQ-LABEL: sitofp_16i1_double:
 ; AVX512DQ:       # BB#0:
-; AVX512DQ-NEXT:    vxorpd %zmm2, %zmm2, %zmm2
+; AVX512DQ-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; AVX512DQ-NEXT:    vcmpltpd %zmm1, %zmm2, %k0
 ; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm2, %k1
 ; AVX512DQ-NEXT:    vpmovm2d %k1, %zmm0
@@ -1192,7 +1192,7 @@ define <16 x double> @sitofp_16i1_double
 define <8 x double> @sitofp_8i1_double(<8 x double> %a) {
 ; NOVLDQ-LABEL: sitofp_8i1_double:
 ; NOVLDQ:       # BB#0:
-; NOVLDQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NOVLDQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; NOVLDQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k1
 ; NOVLDQ-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; NOVLDQ-NEXT:    vpmovqd %zmm0, %ymm0
@@ -1201,7 +1201,7 @@ define <8 x double> @sitofp_8i1_double(<
 ;
 ; VLDQ-LABEL: sitofp_8i1_double:
 ; VLDQ:       # BB#0:
-; VLDQ-NEXT:    vxorpd %zmm1, %zmm1, %zmm1
+; VLDQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; VLDQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
 ; VLDQ-NEXT:    vpmovm2d %k0, %ymm0
 ; VLDQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
@@ -1209,7 +1209,7 @@ define <8 x double> @sitofp_8i1_double(<
 ;
 ; VLNODQ-LABEL: sitofp_8i1_double:
 ; VLNODQ:       # BB#0:
-; VLNODQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; VLNODQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; VLNODQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k1
 ; VLNODQ-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; VLNODQ-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -1218,7 +1218,7 @@ define <8 x double> @sitofp_8i1_double(<
 ;
 ; AVX512DQ-LABEL: sitofp_8i1_double:
 ; AVX512DQ:       # BB#0:
-; AVX512DQ-NEXT:    vxorpd %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
 ; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
 ; AVX512DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
@@ -1241,7 +1241,7 @@ define <8 x float> @sitofp_8i1_float(<8
 ;
 ; VLDQ-LABEL: sitofp_8i1_float:
 ; VLDQ:       # BB#0:
-; VLDQ-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; VLDQ-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; VLDQ-NEXT:    vcmpltps %ymm0, %ymm1, %k0
 ; VLDQ-NEXT:    vpmovm2d %k0, %ymm0
 ; VLDQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
@@ -1249,7 +1249,7 @@ define <8 x float> @sitofp_8i1_float(<8
 ;
 ; VLNODQ-LABEL: sitofp_8i1_float:
 ; VLNODQ:       # BB#0:
-; VLNODQ-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; VLNODQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VLNODQ-NEXT:    vcmpltps %ymm0, %ymm1, %k1
 ; VLNODQ-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; VLNODQ-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -1309,7 +1309,7 @@ define <4 x double> @sitofp_4i1_double(<
 ;
 ; VLDQ-LABEL: sitofp_4i1_double:
 ; VLDQ:       # BB#0:
-; VLDQ-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; VLDQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; VLDQ-NEXT:    vcmpltpd %ymm0, %ymm1, %k0
 ; VLDQ-NEXT:    vpmovm2d %k0, %xmm0
 ; VLDQ-NEXT:    vcvtdq2pd %xmm0, %ymm0
@@ -1317,7 +1317,7 @@ define <4 x double> @sitofp_4i1_double(<
 ;
 ; VLNODQ-LABEL: sitofp_4i1_double:
 ; VLNODQ:       # BB#0:
-; VLNODQ-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; VLNODQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VLNODQ-NEXT:    vcmpltpd %ymm0, %ymm1, %k1
 ; VLNODQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; VLNODQ-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -1414,7 +1414,7 @@ define <16 x float> @uitofp_16i16(<16 x
 define <16 x float> @uitofp_16i1_float(<16 x i32> %a) {
 ; ALL-LABEL: uitofp_16i1_float:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; ALL-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
 ; ALL-NEXT:    vcvtudq2ps %zmm0, %zmm0
@@ -1427,7 +1427,7 @@ define <16 x float> @uitofp_16i1_float(<
 define <16 x double> @uitofp_16i1_double(<16 x i32> %a) {
 ; NOVL-LABEL: uitofp_16i1_double:
 ; NOVL:       # BB#0:
-; NOVL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NOVL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; NOVL-NEXT:    movq {{.*}}(%rip), %rax
 ; NOVL-NEXT:    vpbroadcastq %rax, %zmm0 {%k1} {z}
@@ -1441,7 +1441,7 @@ define <16 x double> @uitofp_16i1_double
 ;
 ; VL-LABEL: uitofp_16i1_double:
 ; VL:       # BB#0:
-; VL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; VL-NEXT:    movl {{.*}}(%rip), %eax
 ; VL-NEXT:    vpbroadcastd %eax, %ymm0 {%k1} {z}
@@ -1469,7 +1469,7 @@ define <8 x float> @uitofp_8i1_float(<8
 ;
 ; VL-LABEL: uitofp_8i1_float:
 ; VL:       # BB#0:
-; VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
 ; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
 ; VL-NEXT:    vcvtudq2ps %ymm0, %ymm0
@@ -1492,7 +1492,7 @@ define <8 x double> @uitofp_8i1_double(<
 ;
 ; VL-LABEL: uitofp_8i1_double:
 ; VL:       # BB#0:
-; VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
 ; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
 ; VL-NEXT:    vcvtudq2pd %ymm0, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll Thu Aug  3 01:50:18 2017
@@ -254,7 +254,7 @@ define void @scatter_mask_qps_execdomain
 define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf)  {
 ; CHECK-LABEL: gather_qps:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
 ; CHECK-NEXT:    vgatherqps (%rdi,%zmm0,4), %ymm1 {%k2}
@@ -333,7 +333,7 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqpd (%rdi,%ymm1,4), %ymm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
-; CHECK-NEXT:    vxorpd %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vgatherqpd (%rdi,%ymm1,2), %ymm2 {%k1}
 ; CHECK-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -351,7 +351,7 @@ define <4 x i64>@test_int_x86_avx512_gat
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherqq (%rdi,%ymm1,8), %ymm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpgatherqq (%rdi,%ymm1,8), %ymm2 {%k1}
 ; CHECK-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -476,7 +476,7 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,4), %ymm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
-; CHECK-NEXT:    vxorpd %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,2), %ymm2 {%k1}
 ; CHECK-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -545,7 +545,7 @@ define <8 x float>@test_int_x86_avx512_g
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdps (%rdi,%ymm1,4), %ymm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
-; CHECK-NEXT:    vxorps %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vgatherdps (%rdi,%ymm1,2), %ymm2 {%k1}
 ; CHECK-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -847,7 +847,7 @@ define <16 x float> @gather_mask_test(<1
 ; CHECK-LABEL: gather_mask_test:
 ; CHECK:       ## BB#0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
-; CHECK-NEXT:    vxorps %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm2 {%k1}
 ; CHECK-NEXT:    kxorw %k0, %k0, %k1
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm3

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Thu Aug  3 01:50:18 2017
@@ -4131,7 +4131,7 @@ define <8 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %zmm0, %zmm3
 ; CHECK-NEXT:    vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
 ; CHECK-NEXT:    vaddpd %zmm4, %zmm3, %zmm3
 ; CHECK-NEXT:    vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0
@@ -4153,7 +4153,7 @@ define <8 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %zmm0, %zmm3
 ; CHECK-NEXT:    vfixupimmpd $3, %zmm2, %zmm1, %zmm3 {%k1} {z}
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vmovapd %zmm0, %zmm5
 ; CHECK-NEXT:    vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z}
 ; CHECK-NEXT:    vaddpd %zmm5, %zmm3, %zmm3
@@ -4222,7 +4222,7 @@ define <16 x float>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %zmm0, %zmm3
 ; CHECK-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1}
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vxorps %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vmovaps %zmm0, %zmm5
 ; CHECK-NEXT:    vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1}
 ; CHECK-NEXT:    vaddps %zmm5, %zmm3, %zmm3
@@ -4247,7 +4247,7 @@ define <16 x float>@test_int_x86_avx512_
 ; CHECK-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3
 ; CHECK-NEXT:    vmovaps %zmm0, %zmm4
 ; CHECK-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    vaddps %zmm0, %zmm4, %zmm0
 ; CHECK-NEXT:    vaddps %zmm3, %zmm0, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll Thu Aug  3 01:50:18 2017
@@ -661,7 +661,7 @@ false:
 define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
 ; KNL-LABEL: test8:
 ; KNL:       ## BB#0:
-; KNL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    cmpl %esi, %edi
 ; KNL-NEXT:    jg LBB17_1
 ; KNL-NEXT:  ## BB#2:
@@ -676,7 +676,7 @@ define <16 x i8> @test8(<16 x i32>%a, <1
 ;
 ; SKX-LABEL: test8:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    jg LBB17_1
 ; SKX-NEXT:  ## BB#2:
@@ -692,7 +692,7 @@ define <16 x i8> @test8(<16 x i32>%a, <1
 ;
 ; AVX512BW-LABEL: test8:
 ; AVX512BW:       ## BB#0:
-; AVX512BW-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-NEXT:    cmpl %esi, %edi
 ; AVX512BW-NEXT:    jg LBB17_1
 ; AVX512BW-NEXT:  ## BB#2:
@@ -708,7 +708,7 @@ define <16 x i8> @test8(<16 x i32>%a, <1
 ;
 ; AVX512DQ-LABEL: test8:
 ; AVX512DQ:       ## BB#0:
-; AVX512DQ-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; AVX512DQ-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512DQ-NEXT:    cmpl %esi, %edi
 ; AVX512DQ-NEXT:    jg LBB17_1
 ; AVX512DQ-NEXT:  ## BB#2:
@@ -3602,7 +3602,7 @@ define void @store_64i1(<64 x i1>* %a, <
 define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
 ; KNL-LABEL: test_bitcast_v8i1_zext:
 ; KNL:       ## BB#0:
-; KNL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; KNL-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    movzbl %al, %eax
@@ -3611,7 +3611,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 ;
 ; SKX-LABEL: test_bitcast_v8i1_zext:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; SKX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kmovb %k0, %eax
 ; SKX-NEXT:    addl %eax, %eax
@@ -3620,7 +3620,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 ;
 ; AVX512BW-LABEL: test_bitcast_v8i1_zext:
 ; AVX512BW:       ## BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
 ; AVX512BW-NEXT:    movzbl %al, %eax
@@ -3630,7 +3630,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 ;
 ; AVX512DQ-LABEL: test_bitcast_v8i1_zext:
 ; AVX512DQ:       ## BB#0:
-; AVX512DQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; AVX512DQ-NEXT:    kmovb %k0, %eax
 ; AVX512DQ-NEXT:    addl %eax, %eax
@@ -3647,7 +3647,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
 ; KNL-LABEL: test_bitcast_v16i1_zext:
 ; KNL:       ## BB#0:
-; KNL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; KNL-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    addl %eax, %eax
@@ -3655,7 +3655,7 @@ define i32 @test_bitcast_v16i1_zext(<16
 ;
 ; SKX-LABEL: test_bitcast_v16i1_zext:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; SKX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kmovw %k0, %eax
 ; SKX-NEXT:    addl %eax, %eax
@@ -3664,7 +3664,7 @@ define i32 @test_bitcast_v16i1_zext(<16
 ;
 ; AVX512BW-LABEL: test_bitcast_v16i1_zext:
 ; AVX512BW:       ## BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; AVX512BW-NEXT:    kmovw %k0, %eax
 ; AVX512BW-NEXT:    addl %eax, %eax
@@ -3673,7 +3673,7 @@ define i32 @test_bitcast_v16i1_zext(<16
 ;
 ; AVX512DQ-LABEL: test_bitcast_v16i1_zext:
 ; AVX512DQ:       ## BB#0:
-; AVX512DQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; AVX512DQ-NEXT:    kmovw %k0, %eax
 ; AVX512DQ-NEXT:    addl %eax, %eax

Modified: llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll Thu Aug  3 01:50:18 2017
@@ -5,7 +5,7 @@
 define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
 ; AVX512-LABEL: test1:
 ; AVX512:       ## BB#0:
-; AVX512-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
 ; AVX512-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} {z}
 ; AVX512-NEXT:    retq
@@ -17,7 +17,7 @@ define <16 x i32> @test1(<16 x i32> %tri
 define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
 ; AVX512-LABEL: test2:
 ; AVX512:       ## BB#0:
-; AVX512-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
 ; AVX512-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} {z}
 ; AVX512-NEXT:    retq
@@ -29,7 +29,7 @@ define <16 x i32> @test2(<16 x i32> %tri
 define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
 ; AVX512-LABEL: test3:
 ; AVX512:       ## BB#0:
-; AVX512-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
 ; AVX512-NEXT:    vmovdqu32 %zmm1, (%rdi) {%k1}
 ; AVX512-NEXT:    vzeroupper
@@ -42,7 +42,7 @@ define void @test3(<16 x i32> %trigger,
 define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %dst) {
 ; AVX512-LABEL: test4:
 ; AVX512:       ## BB#0:
-; AVX512-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
 ; AVX512-NEXT:    vblendmps (%rdi), %zmm1, %zmm0 {%k1}
 ; AVX512-NEXT:    retq
@@ -54,7 +54,7 @@ define <16 x float> @test4(<16 x i32> %t
 define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val) {
 ; AVX512-LABEL: test13:
 ; AVX512:       ## BB#0:
-; AVX512-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
 ; AVX512-NEXT:    vmovups %zmm1, (%rdi) {%k1}
 ; AVX512-NEXT:    vzeroupper
@@ -99,7 +99,7 @@ declare <16 x i32*> @llvm.masked.load.v1
 define <16 x i32*> @test23(<16 x i32*> %trigger, <16 x i32*>* %addr) {
 ; AVX512-LABEL: test23:
 ; AVX512:       ## BB#0:
-; AVX512-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpeqq %zmm2, %zmm0, %k1
 ; AVX512-NEXT:    vpcmpeqq %zmm2, %zmm1, %k2
 ; AVX512-NEXT:    vmovdqu64 64(%rdi), %zmm1 {%k2} {z}

Modified: llvm/trunk/test/CodeGen/X86/avx512-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mov.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mov.ll Thu Aug  3 01:50:18 2017
@@ -311,7 +311,7 @@ define <16 x float> @test31(i8 * %addr)
 define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
 ; CHECK-LABEL: test32:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x48,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -325,7 +325,7 @@ define <16 x i32> @test32(i8 * %addr, <1
 define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
 ; CHECK-LABEL: test33:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x48,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x49,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -339,7 +339,7 @@ define <16 x i32> @test33(i8 * %addr, <1
 define <16 x i32> @test34(i8 * %addr, <16 x i32> %mask1) {
 ; CHECK-LABEL: test34:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1 ## encoding: [0x62,0xf1,0x75,0x48,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x48,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -353,7 +353,7 @@ define <16 x i32> @test34(i8 * %addr, <1
 define <16 x i32> @test35(i8 * %addr, <16 x i32> %mask1) {
 ; CHECK-LABEL: test35:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1 ## encoding: [0x62,0xf1,0x75,0x48,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x48,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xc9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -367,7 +367,7 @@ define <16 x i32> @test35(i8 * %addr, <1
 define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
 ; CHECK-LABEL: test36:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x48,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -381,7 +381,7 @@ define <8 x i64> @test36(i8 * %addr, <8
 define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
 ; CHECK-LABEL: test37:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x48,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x49,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -395,7 +395,7 @@ define <8 x i64> @test37(i8 * %addr, <8
 define <8 x i64> @test38(i8 * %addr, <8 x i64> %mask1) {
 ; CHECK-LABEL: test38:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1 ## encoding: [0x62,0xf1,0x75,0x48,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqq %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x48,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -409,7 +409,7 @@ define <8 x i64> @test38(i8 * %addr, <8
 define <8 x i64> @test39(i8 * %addr, <8 x i64> %mask1) {
 ; CHECK-LABEL: test39:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1 ## encoding: [0x62,0xf1,0x75,0x48,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqq %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x48,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfe,0xc9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -423,7 +423,7 @@ define <8 x i64> @test39(i8 * %addr, <8
 define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
 ; CHECK-LABEL: test40:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
 ; CHECK-NEXT:    vcmpordps %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0x74,0x48,0xc2,0xca,0x07]
 ; CHECK-NEXT:    vcmpneqps %zmm2, %zmm1, %k1 {%k1} ## encoding: [0x62,0xf1,0x74,0x49,0xc2,0xca,0x04]
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x28,0x07]
@@ -438,7 +438,7 @@ define <16 x float> @test40(i8 * %addr,
 define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
 ; CHECK-LABEL: test41:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
 ; CHECK-NEXT:    vcmpordps %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0x74,0x48,0xc2,0xca,0x07]
 ; CHECK-NEXT:    vcmpneqps %zmm2, %zmm1, %k1 {%k1} ## encoding: [0x62,0xf1,0x74,0x49,0xc2,0xca,0x04]
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x10,0x07]
@@ -453,7 +453,7 @@ define <16 x float> @test41(i8 * %addr,
 define <16 x float> @test42(i8 * %addr, <16 x float> %mask1) {
 ; CHECK-LABEL: test42:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1 ## encoding: [0x62,0xf1,0x75,0x48,0xef,0xc9]
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
 ; CHECK-NEXT:    vcmpordps %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x48,0xc2,0xc9,0x07]
 ; CHECK-NEXT:    vcmpneqps %zmm1, %zmm0, %k1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0xc2,0xc9,0x04]
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x28,0x07]
@@ -468,7 +468,7 @@ define <16 x float> @test42(i8 * %addr,
 define <16 x float> @test43(i8 * %addr, <16 x float> %mask1) {
 ; CHECK-LABEL: test43:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1 ## encoding: [0x62,0xf1,0x75,0x48,0xef,0xc9]
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
 ; CHECK-NEXT:    vcmpordps %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x48,0xc2,0xc9,0x07]
 ; CHECK-NEXT:    vcmpneqps %zmm1, %zmm0, %k1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0xc2,0xc9,0x04]
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x10,0x07]
@@ -483,7 +483,7 @@ define <16 x float> @test43(i8 * %addr,
 define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
 ; CHECK-LABEL: test44:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
+; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0x57,0xd2]
 ; CHECK-NEXT:    vcmpordpd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0xf5,0x48,0xc2,0xca,0x07]
 ; CHECK-NEXT:    vcmpneqpd %zmm2, %zmm1, %k1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0xc2,0xca,0x04]
 ; CHECK-NEXT:    vmovapd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x28,0x07]
@@ -498,7 +498,7 @@ define <8 x double> @test44(i8 * %addr,
 define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
 ; CHECK-LABEL: test45:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
+; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0x57,0xd2]
 ; CHECK-NEXT:    vcmpordpd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0xf5,0x48,0xc2,0xca,0x07]
 ; CHECK-NEXT:    vcmpneqpd %zmm2, %zmm1, %k1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0xc2,0xca,0x04]
 ; CHECK-NEXT:    vmovupd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x10,0x07]
@@ -513,7 +513,7 @@ define <8 x double> @test45(i8 * %addr,
 define <8 x double> @test46(i8 * %addr, <8 x double> %mask1) {
 ; CHECK-LABEL: test46:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1 ## encoding: [0x62,0xf1,0x75,0x48,0xef,0xc9]
+; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9]
 ; CHECK-NEXT:    vcmpordpd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc9,0x07]
 ; CHECK-NEXT:    vcmpneqpd %zmm1, %zmm0, %k1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xc2,0xc9,0x04]
 ; CHECK-NEXT:    vmovapd (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x28,0x07]
@@ -528,7 +528,7 @@ define <8 x double> @test46(i8 * %addr,
 define <8 x double> @test47(i8 * %addr, <8 x double> %mask1) {
 ; CHECK-LABEL: test47:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1 ## encoding: [0x62,0xf1,0x75,0x48,0xef,0xc9]
+; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9]
 ; CHECK-NEXT:    vcmpordpd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc9,0x07]
 ; CHECK-NEXT:    vcmpneqpd %zmm1, %zmm0, %k1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xc2,0xc9,0x04]
 ; CHECK-NEXT:    vmovupd (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x10,0x07]

Modified: llvm/trunk/test/CodeGen/X86/avx512-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-select.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-select.ll Thu Aug  3 01:50:18 2017
@@ -6,7 +6,7 @@ define <16 x i32> @select00(i32 %a, <16
 ; X86-LABEL: select00:
 ; X86:       # BB#0:
 ; X86-NEXT:    cmpl $255, {{[0-9]+}}(%esp)
-; X86-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X86-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    je .LBB0_2
 ; X86-NEXT:  # BB#1:
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm1
@@ -16,7 +16,7 @@ define <16 x i32> @select00(i32 %a, <16
 ;
 ; X64-LABEL: select00:
 ; X64:       # BB#0:
-; X64-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    cmpl $255, %edi
 ; X64-NEXT:    je .LBB0_2
 ; X64-NEXT:  # BB#1:
@@ -34,7 +34,7 @@ define <8 x i64> @select01(i32 %a, <8 x
 ; X86-LABEL: select01:
 ; X86:       # BB#0:
 ; X86-NEXT:    cmpl $255, {{[0-9]+}}(%esp)
-; X86-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X86-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    je .LBB1_2
 ; X86-NEXT:  # BB#1:
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm1
@@ -44,7 +44,7 @@ define <8 x i64> @select01(i32 %a, <8 x
 ;
 ; X64-LABEL: select01:
 ; X64:       # BB#0:
-; X64-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    cmpl $255, %edi
 ; X64-NEXT:    je .LBB1_2
 ; X64-NEXT:  # BB#1:

Modified: llvm/trunk/test/CodeGen/X86/avx512-skx-insert-subvec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-skx-insert-subvec.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-skx-insert-subvec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-skx-insert-subvec.ll Thu Aug  3 01:50:18 2017
@@ -31,7 +31,7 @@ define <8 x i1> @test2(<2 x i1> %a) {
 ; CHECK-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; CHECK-NEXT:    vptestmq %xmm0, %xmm0, %k0
 ; CHECK-NEXT:    vpmovm2q %k0, %zmm0
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; CHECK-NEXT:    vpmovq2m %zmm0, %k0
 ; CHECK-NEXT:    vpmovm2w %k0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll Thu Aug  3 01:50:18 2017
@@ -44,7 +44,7 @@ define   <16 x float> @_inreg16xfloat(fl
 define   <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
 ; ALL-LABEL: _ss16xfloat_mask:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; ALL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; ALL-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm1 {%k1}
 ; ALL-NEXT:    vmovaps %zmm1, %zmm0
@@ -59,7 +59,7 @@ define   <16 x float> @_ss16xfloat_mask(
 define   <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
 ; ALL-LABEL: _ss16xfloat_maskz:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
 ; ALL-NEXT:    retq
@@ -84,7 +84,7 @@ define   <16 x float> @_ss16xfloat_load(
 define   <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
 ; ALL-LABEL: _ss16xfloat_mask_load:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; ALL-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1}
 ; ALL-NEXT:    retq
@@ -99,7 +99,7 @@ define   <16 x float> @_ss16xfloat_mask_
 define   <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
 ; ALL-LABEL: _ss16xfloat_maskz_load:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1
 ; ALL-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1} {z}
 ; ALL-NEXT:    retq
@@ -216,7 +216,7 @@ define   <16 x float> @_xmm16xfloat(<16
 define <16 x i32> @test_vbroadcast() {
 ; ALL-LABEL: test_vbroadcast:
 ; ALL:       # BB#0: # %entry
-; ALL-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; ALL-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; ALL-NEXT:    vcmpunordps %zmm0, %zmm0, %k1
 ; ALL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; ALL-NEXT:    knotw %k1, %k1

Modified: llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll Thu Aug  3 01:50:18 2017
@@ -235,7 +235,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0
 ; X64-AVX512VL-LABEL: PR29088:
 ; X64-AVX512VL:       ## BB#0:
 ; X64-AVX512VL-NEXT:    vmovaps (%rdi), %xmm0
-; X64-AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; X64-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X64-AVX512VL-NEXT:    vmovdqa %ymm1, (%rsi)
 ; X64-AVX512VL-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X64-AVX512VL-NEXT:    retq
@@ -243,7 +243,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0
 ; X64-AVX512BWVL-LABEL: PR29088:
 ; X64-AVX512BWVL:       ## BB#0:
 ; X64-AVX512BWVL-NEXT:    vmovaps (%rdi), %xmm0
-; X64-AVX512BWVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; X64-AVX512BWVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X64-AVX512BWVL-NEXT:    vmovdqa %ymm1, (%rsi)
 ; X64-AVX512BWVL-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X64-AVX512BWVL-NEXT:    retq
@@ -251,7 +251,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0
 ; X64-AVX512DQVL-LABEL: PR29088:
 ; X64-AVX512DQVL:       ## BB#0:
 ; X64-AVX512DQVL-NEXT:    vmovaps (%rdi), %xmm0
-; X64-AVX512DQVL-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; X64-AVX512DQVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-AVX512DQVL-NEXT:    vmovaps %ymm1, (%rsi)
 ; X64-AVX512DQVL-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X64-AVX512DQVL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512-vselect-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vselect-crash.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vselect-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vselect-crash.ll Thu Aug  3 01:50:18 2017
@@ -4,7 +4,7 @@
 define <16 x i32> @test() {
 ; CHECK-LABEL: test:
 ; CHECK:       ## BB#0: ## %entry
-; CHECK-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = icmp slt <16 x i32> undef, undef

Modified: llvm/trunk/test/CodeGen/X86/avx512-vselect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vselect.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vselect.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vselect.ll Thu Aug  3 01:50:18 2017
@@ -25,7 +25,7 @@ entry:
 define <16 x double> @test2(<16 x float> %x, <16 x float> %y, <16 x double> %a, <16 x double> %b) {
 ; CHECK-SKX-LABEL: test2:
 ; CHECK-SKX:       # BB#0: # %entry
-; CHECK-SKX-NEXT:    vxorps %zmm6, %zmm6, %zmm6
+; CHECK-SKX-NEXT:    vxorps %xmm6, %xmm6, %xmm6
 ; CHECK-SKX-NEXT:    vcmpltps %zmm0, %zmm6, %k0
 ; CHECK-SKX-NEXT:    vcmpltps %zmm6, %zmm1, %k1
 ; CHECK-SKX-NEXT:    korw %k1, %k0, %k0
@@ -40,7 +40,7 @@ define <16 x double> @test2(<16 x float>
 ;
 ; CHECK-KNL-LABEL: test2:
 ; CHECK-KNL:       # BB#0: # %entry
-; CHECK-KNL-NEXT:    vpxord %zmm6, %zmm6, %zmm6
+; CHECK-KNL-NEXT:    vxorps %xmm6, %xmm6, %xmm6
 ; CHECK-KNL-NEXT:    vcmpltps %zmm0, %zmm6, %k0
 ; CHECK-KNL-NEXT:    vcmpltps %zmm6, %zmm1, %k1
 ; CHECK-KNL-NEXT:    korw %k1, %k0, %k1

Modified: llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll Thu Aug  3 01:50:18 2017
@@ -24,7 +24,7 @@ define void @test2(i8 * %addr, <64 x i8>
 define <64 x i8> @test3(i8 * %addr, <64 x i8> %old, <64 x i8> %mask1) {
 ; CHECK-LABEL: test3:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqb %zmm2, %zmm1, %k1
 ; CHECK-NEXT:    vmovdqu8 (%rdi), %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -38,7 +38,7 @@ define <64 x i8> @test3(i8 * %addr, <64
 define <64 x i8> @test4(i8 * %addr, <64 x i8> %mask1) {
 ; CHECK-LABEL: test4:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vpcmpneqb %zmm1, %zmm0, %k1
 ; CHECK-NEXT:    vmovdqu8 (%rdi), %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -72,7 +72,7 @@ define void @test6(i8 * %addr, <32 x i16
 define <32 x i16> @test7(i8 * %addr, <32 x i16> %old, <32 x i16> %mask1) {
 ; CHECK-LABEL: test7:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqw %zmm2, %zmm1, %k1
 ; CHECK-NEXT:    vmovdqu16 (%rdi), %zmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -86,7 +86,7 @@ define <32 x i16> @test7(i8 * %addr, <32
 define <32 x i16> @test8(i8 * %addr, <32 x i16> %mask1) {
 ; CHECK-LABEL: test8:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vpcmpneqw %zmm1, %zmm0, %k1
 ; CHECK-NEXT:    vmovdqu16 (%rdi), %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll Thu Aug  3 01:50:18 2017
@@ -24,7 +24,7 @@ define void @test_256_2(i8 * %addr, <32
 define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) {
 ; CHECK-LABEL: test_256_3:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqb %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x3f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqu8 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7f,0x29,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -38,7 +38,7 @@ define <32 x i8> @test_256_3(i8 * %addr,
 define <32 x i8> @test_256_4(i8 * %addr, <32 x i8> %mask1) {
 ; CHECK-LABEL: test_256_4:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqb %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x3f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqu8 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7f,0xa9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -72,7 +72,7 @@ define void @test_256_6(i8 * %addr, <16
 define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) {
 ; CHECK-LABEL: test_256_7:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqw %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x3f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqu16 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -86,7 +86,7 @@ define <16 x i16> @test_256_7(i8 * %addr
 define <16 x i16> @test_256_8(i8 * %addr, <16 x i16> %mask1) {
 ; CHECK-LABEL: test_256_8:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqw %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqu16 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0xa9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]

Modified: llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512ifma-intrinsics.ll Thu Aug  3 01:50:18 2017
@@ -11,7 +11,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vpmadd52huq %zmm2, %zmm1, %zmm3
 ; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm4
 ; CHECK-NEXT:    vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1}
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    vpaddq %zmm0, %zmm4, %zmm0
 ; CHECK-NEXT:    vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
@@ -39,7 +39,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vpmadd52huq %zmm2, %zmm1, %zmm3
 ; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm4
 ; CHECK-NEXT:    vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1} {z}
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    vpaddq %zmm0, %zmm4, %zmm0
 ; CHECK-NEXT:    vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
@@ -67,7 +67,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vpmadd52luq %zmm2, %zmm1, %zmm3
 ; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm4
 ; CHECK-NEXT:    vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1}
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    vpaddq %zmm0, %zmm4, %zmm0
 ; CHECK-NEXT:    vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
@@ -95,7 +95,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vpmadd52luq %zmm2, %zmm1, %zmm3
 ; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm4
 ; CHECK-NEXT:    vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1} {z}
-; CHECK-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    vpaddq %zmm0, %zmm4, %zmm0
 ; CHECK-NEXT:    vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}

Modified: llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512ifmavl-intrinsics.ll Thu Aug  3 01:50:18 2017
@@ -40,7 +40,7 @@ define <4 x i64>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm3
 ; CHECK-NEXT:    vmovdqa %ymm0, %ymm4
 ; CHECK-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1}
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    vpaddq %ymm0, %ymm4, %ymm0
 ; CHECK-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
@@ -96,7 +96,7 @@ define <4 x i64>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm3
 ; CHECK-NEXT:    vmovdqa %ymm0, %ymm4
 ; CHECK-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1} {z}
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    vpaddq %ymm0, %ymm4, %ymm0
 ; CHECK-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
@@ -152,7 +152,7 @@ define <4 x i64>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm3
 ; CHECK-NEXT:    vmovdqa %ymm0, %ymm4
 ; CHECK-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1}
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    vpaddq %ymm0, %ymm4, %ymm0
 ; CHECK-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
@@ -208,7 +208,7 @@ define <4 x i64>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm3
 ; CHECK-NEXT:    vmovdqa %ymm0, %ymm4
 ; CHECK-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1} {z}
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    vpaddq %ymm0, %ymm4, %ymm0
 ; CHECK-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}

Modified: llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vbmi-intrinsics.ll Thu Aug  3 01:50:18 2017
@@ -49,7 +49,7 @@ define <64 x i8>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
 ; CHECK-NEXT:    vpermi2b %zmm2, %zmm0, %zmm3 {%k1}
 ; CHECK-NEXT:    vpermi2b %zmm2, %zmm0, %zmm1
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpermi2b %zmm2, %zmm0, %zmm4 {%k1} {z}
 ; CHECK-NEXT:    vpaddb %zmm1, %zmm4, %zmm0
 ; CHECK-NEXT:    vpaddb %zmm0, %zmm3, %zmm0
@@ -71,7 +71,7 @@ define <64 x i8>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
 ; CHECK-NEXT:    vpermt2b %zmm2, %zmm0, %zmm3 {%k1}
 ; CHECK-NEXT:    vpermt2b %zmm2, %zmm0, %zmm1
-; CHECK-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; CHECK-NEXT:    vpermt2b %zmm2, %zmm0, %zmm4 {%k1} {z}
 ; CHECK-NEXT:    vpaddb %zmm1, %zmm4, %zmm0
 ; CHECK-NEXT:    vpaddb %zmm0, %zmm3, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vbmivl-intrinsics.ll Thu Aug  3 01:50:18 2017
@@ -112,7 +112,7 @@ define <32 x i8>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
 ; CHECK-NEXT:    vpermi2b %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x75,0xda]
 ; CHECK-NEXT:    vpermi2b %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x75,0xca]
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpermi2b %ymm2, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x75,0xe2]
 ; CHECK-NEXT:    vpaddb %ymm1, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfc,0xc1]
 ; CHECK-NEXT:    vpaddb %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfc,0xc0]
@@ -156,7 +156,7 @@ define <32 x i8>@test_int_x86_avx512_mas
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7d,0xda]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7d,0xca]
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpermt2b %ymm2, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7d,0xe2]
 ; CHECK-NEXT:    vpaddb %ymm1, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfc,0xc1]
 ; CHECK-NEXT:    vpaddb %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfc,0xc0]

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll Thu Aug  3 01:50:18 2017
@@ -76,7 +76,7 @@ define <8 x i32> @vpaddd256_broadcast_te
 define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd256_mask_test:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
 ; CHECK-NEXT:    vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04]
 ; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -89,7 +89,7 @@ define <8 x i32> @vpaddd256_mask_test(<8
 define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd256_maskz_test:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
 ; CHECK-NEXT:    vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04]
 ; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -102,7 +102,7 @@ define <8 x i32> @vpaddd256_maskz_test(<
 define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd256_mask_fold_test:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vpaddd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -116,7 +116,7 @@ define <8 x i32> @vpaddd256_mask_fold_te
 define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd256_mask_broadcast_test:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfe,0x05,A,A,A,A]
 ; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI10_0-4, kind: reloc_riprel_4byte
@@ -130,7 +130,7 @@ define <8 x i32> @vpaddd256_mask_broadca
 define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd256_maskz_fold_test:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vpaddd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -144,7 +144,7 @@ define <8 x i32> @vpaddd256_maskz_fold_t
 define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: vpaddd256_maskz_broadcast_test:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfe,0x05,A,A,A,A]
 ; CHECK-NEXT:    ## fixup A - offset: 6, value: LCPI12_0-4, kind: reloc_riprel_4byte
@@ -216,7 +216,7 @@ define <8 x float> @test_broadcast_vaddp
 define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vaddps_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vaddps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x58,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -229,7 +229,7 @@ define <8 x float> @test_mask_vaddps_256
 define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vmulps_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vmulps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x59,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -242,7 +242,7 @@ define <8 x float> @test_mask_vmulps_256
 define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1)nounwind readnone {
 ; CHECK-LABEL: test_mask_vminps_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vminps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5d,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -256,7 +256,7 @@ define <8 x float> @test_mask_vminps_256
 define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vmaxps_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vmaxps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -270,7 +270,7 @@ define <8 x float> @test_mask_vmaxps_256
 define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vsubps_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vsubps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5c,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -283,7 +283,7 @@ define <8 x float> @test_mask_vsubps_256
 define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vdivps_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vdivps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5e,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -296,7 +296,7 @@ define <8 x float> @test_mask_vdivps_256
 define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vmulpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vmulpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x59,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -309,7 +309,7 @@ define <4 x double> @test_mask_vmulpd_25
 define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vminpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vminpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5d,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -323,7 +323,7 @@ define <4 x double> @test_mask_vminpd_25
 define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vmaxpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vmaxpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -337,7 +337,7 @@ define <4 x double> @test_mask_vmaxpd_25
 define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vsubpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vsubpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5c,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -350,7 +350,7 @@ define <4 x double> @test_mask_vsubpd_25
 define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vdivpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vdivpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5e,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -363,7 +363,7 @@ define <4 x double> @test_mask_vdivpd_25
 define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_mask_vaddpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
 ; CHECK-NEXT:    vaddpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -376,7 +376,7 @@ define <4 x double> @test_mask_vaddpd_25
 define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
 ; CHECK-LABEL: test_maskz_vaddpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
 ; CHECK-NEXT:    vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04]
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -389,7 +389,7 @@ define <4 x double> @test_maskz_vaddpd_2
 define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double>* %j,  <4 x i64> %mask1) nounwind {
 ; CHECK-LABEL: test_mask_fold_vaddpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
 ; CHECK-NEXT:    vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04]
 ; CHECK-NEXT:    vaddpd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -403,7 +403,7 @@ define <4 x double> @test_mask_fold_vadd
 define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind {
 ; CHECK-LABEL: test_maskz_fold_vaddpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -429,7 +429,7 @@ define <4 x double> @test_broadcast2_vad
 define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, double* %j, <4 x i64> %mask1) nounwind {
 ; CHECK-LABEL: test_mask_broadcast_vaddpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm0, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc0]
+; CHECK-NEXT:    vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
 ; CHECK-NEXT:    vpcmpneqq %ymm0, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xc8,0x04]
 ; CHECK-NEXT:    vaddpd (%rdi){1to4}, %ymm1, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x39,0x58,0x0f]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1]
@@ -446,7 +446,7 @@ define <4 x double> @test_mask_broadcast
 define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, <4 x i64> %mask1) nounwind {
 ; CHECK-LABEL: test_maskz_broadcast_vaddpd_256:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vaddpd (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x58,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll Thu Aug  3 01:50:18 2017
@@ -3950,7 +3950,7 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8]
 ; CHECK-NEXT:    vfixupimmpd $4, %ymm2, %ymm1, %ymm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x54,0xda,0x04]
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vfixupimmpd $5, %ymm2, %ymm1, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xe2,0x05]
 ; CHECK-NEXT:    vaddpd %ymm4, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xdc]
 ; CHECK-NEXT:    vfixupimmpd $3, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x54,0xc2,0x03]
@@ -3972,7 +3972,7 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8]
 ; CHECK-NEXT:    vfixupimmpd $5, %ymm2, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xda,0x05]
-; CHECK-NEXT:    vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT:    vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
 ; CHECK-NEXT:    vmovapd %ymm0, %ymm5 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xe8]
 ; CHECK-NEXT:    vfixupimmpd $4, %ymm4, %ymm1, %ymm5 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xec,0x04]
 ; CHECK-NEXT:    vaddpd %ymm5, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xdd]
@@ -4043,7 +4043,7 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    vfixupimmps $5, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x54,0xda,0x05]
 ; CHECK-NEXT:    vmovaps %ymm0, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xe0]
 ; CHECK-NEXT:    vfixupimmps $5, %ymm2, %ymm1, %ymm4 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x54,0xe2,0x05]
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vfixupimmps $5, %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x54,0xc2,0x05]
 ; CHECK-NEXT:    vaddps %ymm0, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdc,0x58,0xc0]
 ; CHECK-NEXT:    vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
@@ -4066,7 +4066,7 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    vfixupimmps $5, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x54,0xda,0x05]
 ; CHECK-NEXT:    vmovaps %ymm0, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xe0]
 ; CHECK-NEXT:    vfixupimmps $5, %ymm2, %ymm1, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0xa9,0x54,0xe2,0x05]
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vfixupimmps $5, %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0xa9,0x54,0xc2,0x05]
 ; CHECK-NEXT:    vaddps %ymm0, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdc,0x58,0xc0]
 ; CHECK-NEXT:    vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll Thu Aug  3 01:50:18 2017
@@ -164,7 +164,7 @@ define <8 x float> @test_256_16(i8 * %ad
 define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
 ; CHECK-LABEL: test_256_17:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqa32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -178,7 +178,7 @@ define <8 x i32> @test_256_17(i8 * %addr
 define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
 ; CHECK-LABEL: test_256_18:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqu32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -192,7 +192,7 @@ define <8 x i32> @test_256_18(i8 * %addr
 define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) {
 ; CHECK-LABEL: test_256_19:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqd %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqa32 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -206,7 +206,7 @@ define <8 x i32> @test_256_19(i8 * %addr
 define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) {
 ; CHECK-LABEL: test_256_20:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqd %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqu32 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -220,7 +220,7 @@ define <8 x i32> @test_256_20(i8 * %addr
 define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
 ; CHECK-LABEL: test_256_21:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -234,7 +234,7 @@ define <4 x i64> @test_256_21(i8 * %addr
 define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
 ; CHECK-LABEL: test_256_22:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqu64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -248,7 +248,7 @@ define <4 x i64> @test_256_22(i8 * %addr
 define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) {
 ; CHECK-LABEL: test_256_23:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -262,7 +262,7 @@ define <4 x i64> @test_256_23(i8 * %addr
 define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) {
 ; CHECK-LABEL: test_256_24:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqu64 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfe,0xa9,0x6f,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -276,7 +276,7 @@ define <4 x i64> @test_256_24(i8 * %addr
 define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
 ; CHECK-LABEL: test_256_25:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vcmpordps %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf1,0x74,0x28,0xc2,0xca,0x07]
 ; CHECK-NEXT:    vcmpneqps %ymm2, %ymm1, %k1 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0xc2,0xca,0x04]
 ; CHECK-NEXT:    vmovaps (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x28,0x07]
@@ -291,7 +291,7 @@ define <8 x float> @test_256_25(i8 * %ad
 define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
 ; CHECK-LABEL: test_256_26:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vcmpordps %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf1,0x74,0x28,0xc2,0xca,0x07]
 ; CHECK-NEXT:    vcmpneqps %ymm2, %ymm1, %k1 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0xc2,0xca,0x04]
 ; CHECK-NEXT:    vmovups (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x10,0x07]
@@ -306,7 +306,7 @@ define <8 x float> @test_256_26(i8 * %ad
 define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) {
 ; CHECK-LABEL: test_256_27:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vcmpordps %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc9,0x07]
 ; CHECK-NEXT:    vcmpneqps %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0xc2,0xc9,0x04]
 ; CHECK-NEXT:    vmovaps (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x28,0x07]
@@ -321,7 +321,7 @@ define <8 x float> @test_256_27(i8 * %ad
 define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) {
 ; CHECK-LABEL: test_256_28:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vcmpordps %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc9,0x07]
 ; CHECK-NEXT:    vcmpneqps %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0xc2,0xc9,0x04]
 ; CHECK-NEXT:    vmovups (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x10,0x07]
@@ -336,7 +336,7 @@ define <8 x float> @test_256_28(i8 * %ad
 define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
 ; CHECK-LABEL: test_256_29:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovapd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x28,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -350,7 +350,7 @@ define <4 x double> @test_256_29(i8 * %a
 define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
 ; CHECK-LABEL: test_256_30:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovupd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x10,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -364,7 +364,7 @@ define <4 x double> @test_256_30(i8 * %a
 define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) {
 ; CHECK-LABEL: test_256_31:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovapd (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x28,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -378,7 +378,7 @@ define <4 x double> @test_256_31(i8 * %a
 define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) {
 ; CHECK-LABEL: test_256_32:
 ; CHECK:       ## BB#0:
-; CHECK-NEXT:    vpxor %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xef,0xc9]
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovupd (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x10,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-vbroadcast.ll Thu Aug  3 01:50:18 2017
@@ -73,7 +73,7 @@ define   <8 x float> @_inreg8xfloat(floa
 define   <8 x float> @_ss8xfloat_mask(<8 x float> %i, float %a, <8 x i32> %mask1) {
 ; CHECK-LABEL: _ss8xfloat_mask:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; CHECK-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; CHECK-NEXT:    vpcmpneqd %ymm3, %ymm2, %k1
 ; CHECK-NEXT:    vbroadcastss %xmm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -87,7 +87,7 @@ define   <8 x float> @_ss8xfloat_mask(<8
 define   <8 x float> @_ss8xfloat_maskz(float %a, <8 x i32> %mask1) {
 ; CHECK-LABEL: _ss8xfloat_maskz:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1
 ; CHECK-NEXT:    vbroadcastss %xmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll Thu Aug  3 01:50:18 2017
@@ -40,7 +40,7 @@ define zeroext i32 @test_vpcmpeqb_v16i1_
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -166,7 +166,7 @@ define zeroext i32 @test_vpcmpeqb_v16i1_
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -295,7 +295,7 @@ define zeroext i32 @test_masked_vpcmpeqb
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -425,7 +425,7 @@ define zeroext i32 @test_masked_vpcmpeqb
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -555,7 +555,7 @@ define zeroext i64 @test_vpcmpeqb_v16i1_
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -686,7 +686,7 @@ define zeroext i64 @test_vpcmpeqb_v16i1_
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -820,7 +820,7 @@ define zeroext i64 @test_masked_vpcmpeqb
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -955,7 +955,7 @@ define zeroext i64 @test_masked_vpcmpeqb
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -1083,7 +1083,7 @@ define zeroext i64 @test_vpcmpeqb_v32i1_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -1133,7 +1133,7 @@ define zeroext i64 @test_vpcmpeqb_v32i1_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -1181,7 +1181,7 @@ define zeroext i64 @test_masked_vpcmpeqb
 ; NoVLX-NEXT:    vpmovdb %zmm2, %xmm2
 ; NoVLX-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm3, %xmm3
-; NoVLX-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; NoVLX-NEXT:    vxorps %xmm4, %xmm4, %xmm4
 ; NoVLX-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; NoVLX-NEXT:    vpand %xmm3, %xmm1, %xmm1
@@ -1243,7 +1243,7 @@ define zeroext i64 @test_masked_vpcmpeqb
 ; NoVLX-NEXT:    vpmovdb %zmm1, %xmm1
 ; NoVLX-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm2, %xmm2
-; NoVLX-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT:    vxorps %xmm3, %xmm3, %xmm3
 ; NoVLX-NEXT:    vpcmpeqb (%rsi), %ymm0, %ymm0
 ; NoVLX-NEXT:    vextracti128 $1, %ymm0, %xmm4
 ; NoVLX-NEXT:    vpand %xmm2, %xmm4, %xmm2
@@ -1425,7 +1425,7 @@ define zeroext i32 @test_vpcmpeqw_v8i1_v
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -1501,7 +1501,7 @@ define zeroext i32 @test_vpcmpeqw_v8i1_v
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -1580,7 +1580,7 @@ define zeroext i32 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -1660,7 +1660,7 @@ define zeroext i32 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -1740,7 +1740,7 @@ define zeroext i64 @test_vpcmpeqw_v8i1_v
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -1821,7 +1821,7 @@ define zeroext i64 @test_vpcmpeqw_v8i1_v
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -1905,7 +1905,7 @@ define zeroext i64 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -1990,7 +1990,7 @@ define zeroext i64 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -2091,7 +2091,7 @@ define zeroext i32 @test_vpcmpeqw_v16i1_
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -2218,7 +2218,7 @@ define zeroext i32 @test_vpcmpeqw_v16i1_
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -2348,7 +2348,7 @@ define zeroext i32 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -2479,7 +2479,7 @@ define zeroext i32 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -2610,7 +2610,7 @@ define zeroext i64 @test_vpcmpeqw_v16i1_
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -2742,7 +2742,7 @@ define zeroext i64 @test_vpcmpeqw_v16i1_
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -2877,7 +2877,7 @@ define zeroext i64 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -3013,7 +3013,7 @@ define zeroext i64 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -3443,7 +3443,7 @@ define zeroext i64 @test_vpcmpeqw_v32i1_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -3710,7 +3710,7 @@ define zeroext i64 @test_vpcmpeqw_v32i1_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -3926,7 +3926,7 @@ define zeroext i64 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    movq %rax, %rcx
 ; NoVLX-NEXT:    shrq $48, %rax
 ; NoVLX-NEXT:    shrq $32, %rcx
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2
 ; NoVLX-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm2
 ; NoVLX-NEXT:    vinserti128 $1, %xmm5, %ymm2, %ymm2
@@ -4205,7 +4205,7 @@ define zeroext i64 @test_masked_vpcmpeqw
 ; NoVLX-NEXT:    vinserti128 $1, %xmm2, %ymm4, %ymm4
 ; NoVLX-NEXT:    vpmovdb %zmm0, %xmm2
 ; NoVLX-NEXT:    shrq $48, %rax
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrw $7, %eax, %xmm5, %xmm5
 ; NoVLX-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm3
 ; NoVLX-NEXT:    vpcmpeqw (%rsi), %ymm3, %ymm3
@@ -5535,7 +5535,7 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -5585,7 +5585,7 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqd (%rdi), %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -5655,7 +5655,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -5727,7 +5727,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -5783,7 +5783,7 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %xmm1
 ; NoVLX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -5855,7 +5855,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -6102,7 +6102,7 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -6178,7 +6178,7 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovdqa (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -6258,7 +6258,7 @@ define zeroext i32 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -6339,7 +6339,7 @@ define zeroext i32 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -6419,7 +6419,7 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -6500,7 +6500,7 @@ define zeroext i32 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k0, %k1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -6581,7 +6581,7 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -6662,7 +6662,7 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovdqa (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -6747,7 +6747,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -6833,7 +6833,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -6918,7 +6918,7 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -7004,7 +7004,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k0, %k1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -7103,7 +7103,7 @@ define zeroext i32 @test_vpcmpeqd_v16i1_
 ; NoVLX-NEXT:  .Lcfi255:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -7227,7 +7227,7 @@ define zeroext i32 @test_vpcmpeqd_v16i1_
 ; NoVLX-NEXT:  .Lcfi263:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpeqd (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -7354,7 +7354,7 @@ define zeroext i32 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -7482,7 +7482,7 @@ define zeroext i32 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -7610,7 +7610,7 @@ define zeroext i32 @test_vpcmpeqd_v16i1_
 ; NoVLX-NEXT:  .Lcfi287:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpeqd (%rdi){1to16}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -7738,7 +7738,7 @@ define zeroext i32 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -7867,7 +7867,7 @@ define zeroext i64 @test_vpcmpeqd_v16i1_
 ; NoVLX-NEXT:  .Lcfi303:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -7996,7 +7996,7 @@ define zeroext i64 @test_vpcmpeqd_v16i1_
 ; NoVLX-NEXT:  .Lcfi311:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpeqd (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -8128,7 +8128,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -8261,7 +8261,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -8394,7 +8394,7 @@ define zeroext i64 @test_vpcmpeqd_v16i1_
 ; NoVLX-NEXT:  .Lcfi335:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpeqd (%rdi){1to16}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -8527,7 +8527,7 @@ define zeroext i64 @test_masked_vpcmpeqd
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -9735,7 +9735,7 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -9785,7 +9785,7 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqq (%rdi), %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -9847,7 +9847,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -9911,7 +9911,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -9967,7 +9967,7 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpbroadcastq (%rdi), %xmm1
 ; NoVLX-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -10031,7 +10031,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -11265,7 +11265,7 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -11317,7 +11317,7 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqq (%rdi), %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -11389,7 +11389,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -11463,7 +11463,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -11521,7 +11521,7 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v
 ; NoVLX-NEXT:    vpbroadcastq (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -11595,7 +11595,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -11816,7 +11816,7 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -11890,7 +11890,7 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpeqq (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -11967,7 +11967,7 @@ define zeroext i32 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -12045,7 +12045,7 @@ define zeroext i32 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -12123,7 +12123,7 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpeqq (%rdi){1to8}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -12201,7 +12201,7 @@ define zeroext i32 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -12280,7 +12280,7 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -12359,7 +12359,7 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqq (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -12441,7 +12441,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -12524,7 +12524,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -12607,7 +12607,7 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpeqq (%rdi){1to8}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -12690,7 +12690,7 @@ define zeroext i64 @test_masked_vpcmpeqq
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -12791,7 +12791,7 @@ define zeroext i32 @test_vpcmpsgtb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -12917,7 +12917,7 @@ define zeroext i32 @test_vpcmpsgtb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -13046,7 +13046,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -13176,7 +13176,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -13306,7 +13306,7 @@ define zeroext i64 @test_vpcmpsgtb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -13437,7 +13437,7 @@ define zeroext i64 @test_vpcmpsgtb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -13571,7 +13571,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -13706,7 +13706,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -13834,7 +13834,7 @@ define zeroext i64 @test_vpcmpsgtb_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -13884,7 +13884,7 @@ define zeroext i64 @test_vpcmpsgtb_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -13932,7 +13932,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpmovdb %zmm2, %xmm2
 ; NoVLX-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm3, %xmm3
-; NoVLX-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; NoVLX-NEXT:    vxorps %xmm4, %xmm4, %xmm4
 ; NoVLX-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; NoVLX-NEXT:    vpand %xmm3, %xmm1, %xmm1
@@ -13994,7 +13994,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpmovdb %zmm1, %xmm1
 ; NoVLX-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm2, %xmm2
-; NoVLX-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT:    vxorps %xmm3, %xmm3, %xmm3
 ; NoVLX-NEXT:    vpcmpgtb (%rsi), %ymm0, %ymm0
 ; NoVLX-NEXT:    vextracti128 $1, %ymm0, %xmm4
 ; NoVLX-NEXT:    vpand %xmm2, %xmm4, %xmm2
@@ -14176,7 +14176,7 @@ define zeroext i32 @test_vpcmpsgtw_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -14252,7 +14252,7 @@ define zeroext i32 @test_vpcmpsgtw_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -14331,7 +14331,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -14411,7 +14411,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -14491,7 +14491,7 @@ define zeroext i64 @test_vpcmpsgtw_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -14572,7 +14572,7 @@ define zeroext i64 @test_vpcmpsgtw_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -14656,7 +14656,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -14741,7 +14741,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -14842,7 +14842,7 @@ define zeroext i32 @test_vpcmpsgtw_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -14969,7 +14969,7 @@ define zeroext i32 @test_vpcmpsgtw_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -15099,7 +15099,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -15230,7 +15230,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -15361,7 +15361,7 @@ define zeroext i64 @test_vpcmpsgtw_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -15493,7 +15493,7 @@ define zeroext i64 @test_vpcmpsgtw_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -15628,7 +15628,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -15764,7 +15764,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -16194,7 +16194,7 @@ define zeroext i64 @test_vpcmpsgtw_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -16461,7 +16461,7 @@ define zeroext i64 @test_vpcmpsgtw_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -16677,7 +16677,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    movq %rax, %rcx
 ; NoVLX-NEXT:    shrq $48, %rax
 ; NoVLX-NEXT:    shrq $32, %rcx
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2
 ; NoVLX-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm2
 ; NoVLX-NEXT:    vinserti128 $1, %xmm5, %ymm2, %ymm2
@@ -16956,7 +16956,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vinserti128 $1, %xmm2, %ymm4, %ymm4
 ; NoVLX-NEXT:    vpmovdb %zmm0, %xmm2
 ; NoVLX-NEXT:    shrq $48, %rax
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrw $7, %eax, %xmm5, %xmm5
 ; NoVLX-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm3
 ; NoVLX-NEXT:    vpcmpgtw (%rsi), %ymm3, %ymm3
@@ -18286,7 +18286,7 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -18336,7 +18336,7 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtd (%rdi), %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -18406,7 +18406,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -18478,7 +18478,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -18534,7 +18534,7 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %xmm1
 ; NoVLX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -18606,7 +18606,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -18853,7 +18853,7 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -18929,7 +18929,7 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovdqa (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -19009,7 +19009,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -19090,7 +19090,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -19170,7 +19170,7 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -19251,7 +19251,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k0, %k1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -19332,7 +19332,7 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -19413,7 +19413,7 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovdqa (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -19498,7 +19498,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -19584,7 +19584,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -19669,7 +19669,7 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -19755,7 +19755,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k0, %k1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -19854,7 +19854,7 @@ define zeroext i32 @test_vpcmpsgtd_v16i1
 ; NoVLX-NEXT:  .Lcfi707:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -19978,7 +19978,7 @@ define zeroext i32 @test_vpcmpsgtd_v16i1
 ; NoVLX-NEXT:  .Lcfi715:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpgtd (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -20105,7 +20105,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -20233,7 +20233,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -20361,7 +20361,7 @@ define zeroext i32 @test_vpcmpsgtd_v16i1
 ; NoVLX-NEXT:  .Lcfi739:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpgtd (%rdi){1to16}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -20489,7 +20489,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -20618,7 +20618,7 @@ define zeroext i64 @test_vpcmpsgtd_v16i1
 ; NoVLX-NEXT:  .Lcfi755:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -20747,7 +20747,7 @@ define zeroext i64 @test_vpcmpsgtd_v16i1
 ; NoVLX-NEXT:  .Lcfi763:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpgtd (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -20879,7 +20879,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -21012,7 +21012,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -21145,7 +21145,7 @@ define zeroext i64 @test_vpcmpsgtd_v16i1
 ; NoVLX-NEXT:  .Lcfi787:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpgtd (%rdi){1to16}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -21278,7 +21278,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -22486,7 +22486,7 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -22536,7 +22536,7 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtq (%rdi), %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -22598,7 +22598,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -22662,7 +22662,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -22718,7 +22718,7 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpbroadcastq (%rdi), %xmm1
 ; NoVLX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -22782,7 +22782,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -24016,7 +24016,7 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -24068,7 +24068,7 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtq (%rdi), %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -24140,7 +24140,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -24214,7 +24214,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -24272,7 +24272,7 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_
 ; NoVLX-NEXT:    vpbroadcastq (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -24346,7 +24346,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -24567,7 +24567,7 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpgtq %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -24641,7 +24641,7 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpgtq (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -24718,7 +24718,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -24796,7 +24796,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -24874,7 +24874,7 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpgtq (%rdi){1to8}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -24952,7 +24952,7 @@ define zeroext i32 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -25031,7 +25031,7 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtq %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -25110,7 +25110,7 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtq (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -25192,7 +25192,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -25275,7 +25275,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -25358,7 +25358,7 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpgtq (%rdi){1to8}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -25441,7 +25441,7 @@ define zeroext i64 @test_masked_vpcmpsgt
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -25544,7 +25544,7 @@ define zeroext i32 @test_vpcmpsgeb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -25673,7 +25673,7 @@ define zeroext i32 @test_vpcmpsgeb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -25804,7 +25804,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -25937,7 +25937,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -26069,7 +26069,7 @@ define zeroext i64 @test_vpcmpsgeb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -26203,7 +26203,7 @@ define zeroext i64 @test_vpcmpsgeb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -26339,7 +26339,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -26477,7 +26477,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -26607,7 +26607,7 @@ define zeroext i64 @test_vpcmpsgeb_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -26660,7 +26660,7 @@ define zeroext i64 @test_vpcmpsgeb_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -26708,7 +26708,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpmovdb %zmm2, %xmm2
 ; NoVLX-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm3, %xmm3
-; NoVLX-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; NoVLX-NEXT:    vxorps %xmm4, %xmm4, %xmm4
 ; NoVLX-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
 ; NoVLX-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; NoVLX-NEXT:    vpxor %ymm1, %ymm0, %ymm0
@@ -26772,7 +26772,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpmovdb %zmm1, %xmm1
 ; NoVLX-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm2, %xmm2
-; NoVLX-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT:    vxorps %xmm3, %xmm3, %xmm3
 ; NoVLX-NEXT:    vmovdqa (%rsi), %ymm4
 ; NoVLX-NEXT:    vpcmpgtb %ymm0, %ymm4, %ymm0
 ; NoVLX-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
@@ -26969,7 +26969,7 @@ define zeroext i32 @test_vpcmpsgew_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -27048,7 +27048,7 @@ define zeroext i32 @test_vpcmpsgew_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -27129,7 +27129,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -27212,7 +27212,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -27294,7 +27294,7 @@ define zeroext i64 @test_vpcmpsgew_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -27378,7 +27378,7 @@ define zeroext i64 @test_vpcmpsgew_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -27464,7 +27464,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -27552,7 +27552,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -27655,7 +27655,7 @@ define zeroext i32 @test_vpcmpsgew_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -27785,7 +27785,7 @@ define zeroext i32 @test_vpcmpsgew_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -27917,7 +27917,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -28051,7 +28051,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -28184,7 +28184,7 @@ define zeroext i64 @test_vpcmpsgew_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -28319,7 +28319,7 @@ define zeroext i64 @test_vpcmpsgew_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -28456,7 +28456,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -28595,7 +28595,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -29028,7 +29028,7 @@ define zeroext i64 @test_vpcmpsgew_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -29300,7 +29300,7 @@ define zeroext i64 @test_vpcmpsgew_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -29516,7 +29516,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    movq %rax, %rcx
 ; NoVLX-NEXT:    shrq $48, %rax
 ; NoVLX-NEXT:    shrq $32, %rcx
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2
 ; NoVLX-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm2
 ; NoVLX-NEXT:    vinserti128 $1, %xmm5, %ymm2, %ymm2
@@ -29798,7 +29798,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vinserti128 $1, %xmm2, %ymm4, %ymm4
 ; NoVLX-NEXT:    vpmovdb %zmm0, %xmm2
 ; NoVLX-NEXT:    shrq $48, %rax
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrw $7, %eax, %xmm5, %xmm5
 ; NoVLX-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm3
 ; NoVLX-NEXT:    vmovdqa (%rsi), %ymm5
@@ -31165,7 +31165,7 @@ define zeroext i64 @test_vpcmpsged_v4i1_
 ; NoVLX-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
 ; NoVLX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -31218,7 +31218,7 @@ define zeroext i64 @test_vpcmpsged_v4i1_
 ; NoVLX-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
 ; NoVLX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -31288,7 +31288,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -31361,7 +31361,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -31420,7 +31420,7 @@ define zeroext i64 @test_vpcmpsged_v4i1_
 ; NoVLX-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
 ; NoVLX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -31493,7 +31493,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -31742,7 +31742,7 @@ define zeroext i32 @test_vpcmpsged_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -31818,7 +31818,7 @@ define zeroext i32 @test_vpcmpsged_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovdqa (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -31898,7 +31898,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -31979,7 +31979,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -32060,7 +32060,7 @@ define zeroext i32 @test_vpcmpsged_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -32142,7 +32142,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k0, %k1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -32223,7 +32223,7 @@ define zeroext i64 @test_vpcmpsged_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -32304,7 +32304,7 @@ define zeroext i64 @test_vpcmpsged_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovdqa (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -32389,7 +32389,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -32475,7 +32475,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -32561,7 +32561,7 @@ define zeroext i64 @test_vpcmpsged_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -32648,7 +32648,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k0, %k1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -32747,7 +32747,7 @@ define zeroext i32 @test_vpcmpsged_v16i1
 ; NoVLX-NEXT:  .Lcfi1159:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -32871,7 +32871,7 @@ define zeroext i32 @test_vpcmpsged_v16i1
 ; NoVLX-NEXT:  .Lcfi1167:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpnltd (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -32998,7 +32998,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -33126,7 +33126,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -33256,7 +33256,7 @@ define zeroext i32 @test_vpcmpsged_v16i1
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %zmm1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -33386,7 +33386,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpbroadcastd (%rsi), %zmm1
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -33515,7 +33515,7 @@ define zeroext i64 @test_vpcmpsged_v16i1
 ; NoVLX-NEXT:  .Lcfi1207:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -33644,7 +33644,7 @@ define zeroext i64 @test_vpcmpsged_v16i1
 ; NoVLX-NEXT:  .Lcfi1215:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpnltd (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -33776,7 +33776,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -33909,7 +33909,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -34044,7 +34044,7 @@ define zeroext i64 @test_vpcmpsged_v16i1
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %zmm1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -34179,7 +34179,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpbroadcastd (%rsi), %zmm1
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpled %zmm0, %zmm1, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -35429,7 +35429,7 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_
 ; NoVLX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; NoVLX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -35482,7 +35482,7 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_
 ; NoVLX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; NoVLX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -35544,7 +35544,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -35609,7 +35609,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -35668,7 +35668,7 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_
 ; NoVLX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; NoVLX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -35733,7 +35733,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -37017,7 +37017,7 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_
 ; NoVLX-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; NoVLX-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -37072,7 +37072,7 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_
 ; NoVLX-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; NoVLX-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -37146,7 +37146,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -37223,7 +37223,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -37284,7 +37284,7 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_
 ; NoVLX-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; NoVLX-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -37361,7 +37361,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -37586,7 +37586,7 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpleq %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -37660,7 +37660,7 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpnltq (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -37737,7 +37737,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -37815,7 +37815,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -37895,7 +37895,7 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpbroadcastq (%rdi), %zmm1
 ; NoVLX-NEXT:    vpcmpleq %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -37975,7 +37975,7 @@ define zeroext i32 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpbroadcastq (%rsi), %zmm1
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -38054,7 +38054,7 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpleq %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -38133,7 +38133,7 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpnltq (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -38215,7 +38215,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -38298,7 +38298,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -38383,7 +38383,7 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpbroadcastq (%rdi), %zmm1
 ; NoVLX-NEXT:    vpcmpleq %zmm0, %zmm1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -38468,7 +38468,7 @@ define zeroext i64 @test_masked_vpcmpsge
 ; NoVLX-NEXT:    vpbroadcastq (%rsi), %zmm1
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -38572,7 +38572,7 @@ define zeroext i32 @test_vpcmpultb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -38701,7 +38701,7 @@ define zeroext i32 @test_vpcmpultb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -38833,7 +38833,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -38966,7 +38966,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -39099,7 +39099,7 @@ define zeroext i64 @test_vpcmpultb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -39233,7 +39233,7 @@ define zeroext i64 @test_vpcmpultb_v16i1
 ; NoVLX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -39370,7 +39370,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -39508,7 +39508,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -39639,7 +39639,7 @@ define zeroext i64 @test_vpcmpultb_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -39692,7 +39692,7 @@ define zeroext i64 @test_vpcmpultb_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -39740,7 +39740,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpmovdb %zmm2, %xmm2
 ; NoVLX-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm3, %xmm3
-; NoVLX-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; NoVLX-NEXT:    vxorps %xmm4, %xmm4, %xmm4
 ; NoVLX-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; NoVLX-NEXT:    vpxor %ymm5, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpxor %ymm5, %ymm1, %ymm1
@@ -39805,7 +39805,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpmovdb %zmm1, %xmm1
 ; NoVLX-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm2, %xmm2
-; NoVLX-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT:    vxorps %xmm3, %xmm3, %xmm3
 ; NoVLX-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; NoVLX-NEXT:    vpxor %ymm4, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpxor (%rsi), %ymm4, %ymm4
@@ -40005,7 +40005,7 @@ define zeroext i32 @test_vpcmpultw_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -40084,7 +40084,7 @@ define zeroext i32 @test_vpcmpultw_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -40166,7 +40166,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -40249,7 +40249,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -40332,7 +40332,7 @@ define zeroext i64 @test_vpcmpultw_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -40416,7 +40416,7 @@ define zeroext i64 @test_vpcmpultw_v8i1_
 ; NoVLX-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -40503,7 +40503,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -40591,7 +40591,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmq %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -40695,7 +40695,7 @@ define zeroext i32 @test_vpcmpultw_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -40825,7 +40825,7 @@ define zeroext i32 @test_vpcmpultw_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -40958,7 +40958,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -41092,7 +41092,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -41226,7 +41226,7 @@ define zeroext i64 @test_vpcmpultw_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -41361,7 +41361,7 @@ define zeroext i64 @test_vpcmpultw_v16i1
 ; NoVLX-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -41499,7 +41499,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -41638,7 +41638,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -42073,7 +42073,7 @@ define zeroext i64 @test_vpcmpultw_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -42345,7 +42345,7 @@ define zeroext i64 @test_vpcmpultw_v32i1
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, (%rsp)
 ; NoVLX-NEXT:    movl (%rsp), %ecx
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -42560,7 +42560,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    movq %rax, %rcx
 ; NoVLX-NEXT:    shrq $48, %rax
 ; NoVLX-NEXT:    shrq $32, %rcx
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpinsrw $6, %ecx, %xmm3, %xmm3
 ; NoVLX-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm3
 ; NoVLX-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
@@ -42844,7 +42844,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
 ; NoVLX-NEXT:    vpmovdb %zmm0, %xmm2
 ; NoVLX-NEXT:    shrq $48, %rax
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm4
 ; NoVLX-NEXT:    vpinsrw $7, %eax, %xmm7, %xmm3
 ; NoVLX-NEXT:    vinserti128 $1, %xmm6, %ymm3, %ymm3
@@ -44237,7 +44237,7 @@ define zeroext i64 @test_vpcmpultd_v4i1_
 ; NoVLX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -44290,7 +44290,7 @@ define zeroext i64 @test_vpcmpultd_v4i1_
 ; NoVLX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpxor (%rdi), %xmm1, %xmm1
 ; NoVLX-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -44363,7 +44363,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -44438,7 +44438,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -44497,7 +44497,7 @@ define zeroext i64 @test_vpcmpultd_v4i1_
 ; NoVLX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -44572,7 +44572,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -44819,7 +44819,7 @@ define zeroext i32 @test_vpcmpultd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -44895,7 +44895,7 @@ define zeroext i32 @test_vpcmpultd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovdqa (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -44975,7 +44975,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -45056,7 +45056,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -45136,7 +45136,7 @@ define zeroext i32 @test_vpcmpultd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -45217,7 +45217,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k0, %k1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -45298,7 +45298,7 @@ define zeroext i64 @test_vpcmpultd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -45379,7 +45379,7 @@ define zeroext i64 @test_vpcmpultd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovdqa (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -45464,7 +45464,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -45550,7 +45550,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -45635,7 +45635,7 @@ define zeroext i64 @test_vpcmpultd_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vpbroadcastd (%rdi), %ymm1
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -45721,7 +45721,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k0, %k1, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -45820,7 +45820,7 @@ define zeroext i32 @test_vpcmpultd_v16i1
 ; NoVLX-NEXT:  .Lcfi1611:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -45944,7 +45944,7 @@ define zeroext i32 @test_vpcmpultd_v16i1
 ; NoVLX-NEXT:  .Lcfi1619:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpltud (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -46071,7 +46071,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -46199,7 +46199,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltud (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -46327,7 +46327,7 @@ define zeroext i32 @test_vpcmpultd_v16i1
 ; NoVLX-NEXT:  .Lcfi1643:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpltud (%rdi){1to16}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -46455,7 +46455,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -46584,7 +46584,7 @@ define zeroext i64 @test_vpcmpultd_v16i1
 ; NoVLX-NEXT:  .Lcfi1659:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -46713,7 +46713,7 @@ define zeroext i64 @test_vpcmpultd_v16i1
 ; NoVLX-NEXT:  .Lcfi1667:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpltud (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -46845,7 +46845,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -46978,7 +46978,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltud (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -47111,7 +47111,7 @@ define zeroext i64 @test_vpcmpultd_v16i1
 ; NoVLX-NEXT:  .Lcfi1691:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vpcmpltud (%rdi){1to16}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -47244,7 +47244,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -48527,7 +48527,7 @@ define zeroext i64 @test_vpcmpultq_v2i1_
 ; NoVLX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -48580,7 +48580,7 @@ define zeroext i64 @test_vpcmpultq_v2i1_
 ; NoVLX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpxor (%rdi), %xmm1, %xmm1
 ; NoVLX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -48645,7 +48645,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -48712,7 +48712,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -48771,7 +48771,7 @@ define zeroext i64 @test_vpcmpultq_v2i1_
 ; NoVLX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -48838,7 +48838,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    vmovd %ecx, %xmm1
 ; NoVLX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -50129,7 +50129,7 @@ define zeroext i64 @test_vpcmpultq_v4i1_
 ; NoVLX-NEXT:    vpxor %ymm2, %ymm1, %ymm1
 ; NoVLX-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -50184,7 +50184,7 @@ define zeroext i64 @test_vpcmpultq_v4i1_
 ; NoVLX-NEXT:    vpxor (%rdi), %ymm1, %ymm1
 ; NoVLX-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -50259,7 +50259,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -50336,7 +50336,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -50397,7 +50397,7 @@ define zeroext i64 @test_vpcmpultq_v4i1_
 ; NoVLX-NEXT:    vpxor %ymm2, %ymm1, %ymm1
 ; NoVLX-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -50474,7 +50474,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    kmovw %k1, %eax
 ; NoVLX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
 ; NoVLX-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -50695,7 +50695,7 @@ define zeroext i32 @test_vpcmpultq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpltuq %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -50769,7 +50769,7 @@ define zeroext i32 @test_vpcmpultq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpltuq (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -50846,7 +50846,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -50924,7 +50924,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -51002,7 +51002,7 @@ define zeroext i32 @test_vpcmpultq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vpcmpltuq (%rdi){1to8}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -51080,7 +51080,7 @@ define zeroext i32 @test_masked_vpcmpult
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -51159,7 +51159,7 @@ define zeroext i64 @test_vpcmpultq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpltuq %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -51238,7 +51238,7 @@ define zeroext i64 @test_vpcmpultq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpltuq (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -51320,7 +51320,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -51403,7 +51403,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -51486,7 +51486,7 @@ define zeroext i64 @test_vpcmpultq_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vpcmpltuq (%rdi){1to8}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -51569,7 +51569,7 @@ define zeroext i64 @test_masked_vpcmpult
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -52685,7 +52685,7 @@ define zeroext i64 @test_vcmpoeqps_v4i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -52735,7 +52735,7 @@ define zeroext i64 @test_vcmpoeqps_v4i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -52787,7 +52787,7 @@ define zeroext i64 @test_vcmpoeqps_v4i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vbroadcastss (%rdi), %xmm1
 ; NoVLX-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -52845,7 +52845,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
 ; NoVLX-NEXT:    vpmovqd %zmm2, %ymm2
-; NoVLX-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT:    vxorps %xmm3, %xmm3, %xmm3
 ; NoVLX-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm0
 ; NoVLX-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm3, %zmm3, %k0
@@ -52905,7 +52905,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
 ; NoVLX-NEXT:    vpmovqd %zmm1, %ymm1
-; NoVLX-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; NoVLX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; NoVLX-NEXT:    vcmpeqps (%rsi), %xmm0, %xmm0
 ; NoVLX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm2, %zmm2, %k0
@@ -52966,7 +52966,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
 ; NoVLX-NEXT:    vpmovqd %zmm1, %ymm1
-; NoVLX-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; NoVLX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; NoVLX-NEXT:    vbroadcastss (%rsi), %xmm3
 ; NoVLX-NEXT:    vcmpeqps %xmm3, %xmm0, %xmm0
 ; NoVLX-NEXT:    vandps %xmm1, %xmm0, %xmm0
@@ -53216,7 +53216,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -53240,7 +53240,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53292,7 +53292,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovaps (%rdi), %ymm1
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -53316,7 +53316,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53369,7 +53369,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vbroadcastss (%rdi), %ymm1
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -53393,7 +53393,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53450,7 +53450,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -53474,7 +53474,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53531,7 +53531,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -53555,7 +53555,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53613,7 +53613,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -53637,7 +53637,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53695,7 +53695,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -53721,7 +53721,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53776,7 +53776,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vmovaps (%rdi), %ymm1
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -53802,7 +53802,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53858,7 +53858,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; NoVLX-NEXT:    vbroadcastss (%rdi), %ymm1
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -53884,7 +53884,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -53944,7 +53944,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -53970,7 +53970,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -54030,7 +54030,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -54056,7 +54056,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -54117,7 +54117,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    kandw %k1, %k0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -54143,7 +54143,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -54217,7 +54217,7 @@ define zeroext i32 @test_vcmpoeqps_v16i1
 ; NoVLX-NEXT:  .Lcfi1887:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -54341,7 +54341,7 @@ define zeroext i32 @test_vcmpoeqps_v16i1
 ; NoVLX-NEXT:  .Lcfi1895:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vcmpeqps (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -54466,7 +54466,7 @@ define zeroext i32 @test_vcmpoeqps_v16i1
 ; NoVLX-NEXT:  .Lcfi1903:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vcmpeqps (%rdi){1to16}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -54594,7 +54594,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -54722,7 +54722,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqps (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -54851,7 +54851,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqps (%rsi){1to16}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $14, %k0, %k1
@@ -55027,7 +55027,7 @@ define zeroext i64 @test_vcmpoeqps_v16i1
 ; NoVLX-NEXT:  .Lcfi1935:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -55156,7 +55156,7 @@ define zeroext i64 @test_vcmpoeqps_v16i1
 ; NoVLX-NEXT:  .Lcfi1943:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vcmpeqps (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -55286,7 +55286,7 @@ define zeroext i64 @test_vcmpoeqps_v16i1
 ; NoVLX-NEXT:  .Lcfi1951:
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    vcmpeqps (%rdi){1to16}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -55419,7 +55419,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqps %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -55552,7 +55552,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqps (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -55686,7 +55686,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    .cfi_offset %r15, -24
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqps (%rsi){1to16}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -56886,7 +56886,7 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqpd %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -56936,7 +56936,7 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqpd (%rdi), %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -56988,7 +56988,7 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
 ; NoVLX-NEXT:    vcmpeqpd %xmm1, %xmm0, %xmm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -57045,7 +57045,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
-; NoVLX-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT:    vxorps %xmm3, %xmm3, %xmm3
 ; NoVLX-NEXT:    vcmpeqpd %xmm1, %xmm0, %xmm0
 ; NoVLX-NEXT:    vandpd %xmm2, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm3, %zmm3, %k0
@@ -57104,7 +57104,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
-; NoVLX-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; NoVLX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; NoVLX-NEXT:    vcmpeqpd (%rsi), %xmm0, %xmm0
 ; NoVLX-NEXT:    vandpd %xmm1, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm2, %zmm2, %k0
@@ -57164,7 +57164,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
-; NoVLX-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; NoVLX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; NoVLX-NEXT:    vmovddup {{.*#+}} xmm3 = mem[0,0]
 ; NoVLX-NEXT:    vcmpeqpd %xmm3, %xmm0, %xmm0
 ; NoVLX-NEXT:    vandpd %xmm1, %xmm0, %xmm0
@@ -58293,7 +58293,7 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -58345,7 +58345,7 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -58399,7 +58399,7 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_
 ; NoVLX-NEXT:    vbroadcastsd (%rdi), %ymm1
 ; NoVLX-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
-; NoVLX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; NoVLX-NEXT:    vptestmd %zmm1, %zmm1, %k0
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k0, {{[0-9]+}}(%rsp)
@@ -58458,7 +58458,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
 ; NoVLX-NEXT:    vpmovqd %zmm2, %ymm2
-; NoVLX-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT:    vxorps %xmm3, %xmm3, %xmm3
 ; NoVLX-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
 ; NoVLX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -58520,7 +58520,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
 ; NoVLX-NEXT:    vpmovqd %zmm1, %ymm1
-; NoVLX-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; NoVLX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; NoVLX-NEXT:    vcmpeqpd (%rsi), %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
 ; NoVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -58583,7 +58583,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kmovw %eax, %k1
 ; NoVLX-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
 ; NoVLX-NEXT:    vpmovqd %zmm1, %ymm1
-; NoVLX-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; NoVLX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; NoVLX-NEXT:    vbroadcastsd (%rsi), %ymm3
 ; NoVLX-NEXT:    vcmpeqpd %ymm3, %ymm0, %ymm0
 ; NoVLX-NEXT:    vpmovqd %zmm0, %ymm0
@@ -58862,7 +58862,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vcmpeqpd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -58886,7 +58886,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -58936,7 +58936,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vcmpeqpd (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -58960,7 +58960,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59011,7 +59011,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    vcmpeqpd (%rdi){1to8}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -59035,7 +59035,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59089,7 +59089,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -59113,7 +59113,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59167,7 +59167,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -59191,7 +59191,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59246,7 +59246,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    subq $32, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kshiftlw $15, %k0, %k1
@@ -59270,7 +59270,7 @@ define zeroext i32 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59374,7 +59374,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqpd %zmm1, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -59400,7 +59400,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59453,7 +59453,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqpd (%rdi), %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -59479,7 +59479,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59533,7 +59533,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    andq $-32, %rsp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    vcmpeqpd (%rdi){1to8}, %zmm0, %k0
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -59559,7 +59559,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59616,7 +59616,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -59642,7 +59642,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59699,7 +59699,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -59725,7 +59725,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
@@ -59783,7 +59783,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    subq $64, %rsp
 ; NoVLX-NEXT:    kmovw %edi, %k1
 ; NoVLX-NEXT:    vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
-; NoVLX-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
 ; NoVLX-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
@@ -59809,7 +59809,7 @@ define zeroext i64 @test_masked_vcmpoeqp
 ; NoVLX-NEXT:    kshiftlw $9, %k0, %k1
 ; NoVLX-NEXT:    kshiftrw $15, %k1, %k1
 ; NoVLX-NEXT:    kmovw %k1, %ecx
-; NoVLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $0, %r8d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $1, %r9d, %xmm0, %xmm0
 ; NoVLX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/compress_expand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/compress_expand.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/compress_expand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/compress_expand.ll Thu Aug  3 01:50:18 2017
@@ -204,7 +204,7 @@ define void @test10(i64* %base, <4 x i64
 ; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; KNL-NEXT:    vpmovsxdq %xmm1, %ymm1
-; KNL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
 ; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -227,7 +227,7 @@ define void @test11(i64* %base, <2 x i64
 ; KNL-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
 ; KNL-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; KNL-NEXT:    vpsraq $63, %zmm1, %zmm1
-; KNL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    vinserti32x4 $0, %xmm1, %zmm2, %zmm1
 ; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -250,7 +250,7 @@ define void @test12(float* %base, <4 x f
 ; KNL-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
 ; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL-NEXT:    vpsrad $31, %xmm1, %xmm1
-; KNL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    vinserti32x4 $0, %xmm1, %zmm2, %zmm1
 ; KNL-NEXT:    vpslld $31, %zmm1, %zmm1
 ; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -276,7 +276,7 @@ define <2 x float> @test13(float* %base,
 ; KNL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
 ; KNL-NEXT:    vpcmpeqq %xmm2, %xmm1, %xmm1
 ; KNL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
-; KNL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    vinserti32x4 $0, %xmm1, %zmm2, %zmm1
 ; KNL-NEXT:    vpslld $31, %zmm1, %zmm1
 ; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -304,7 +304,7 @@ define void @test14(float* %base, <2 x f
 ; KNL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
 ; KNL-NEXT:    vpcmpeqq %xmm2, %xmm1, %xmm1
 ; KNL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
-; KNL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    vinserti32x4 $0, %xmm1, %zmm2, %zmm1
 ; KNL-NEXT:    vpslld $31, %zmm1, %zmm1
 ; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -318,7 +318,7 @@ define void @test14(float* %base, <2 x f
 define <32 x float> @test15(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
 ; ALL-LABEL: test15:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; ALL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; ALL-NEXT:    vpcmpeqd %zmm4, %zmm3, %k1
 ; ALL-NEXT:    vpcmpeqd %zmm4, %zmm2, %k2
 ; ALL-NEXT:    kmovw %k2, %eax
@@ -335,7 +335,7 @@ define <16 x double> @test16(double* %ba
 ; SKX-LABEL: test16:
 ; SKX:       # BB#0:
 ; SKX-NEXT:    vextracti32x8 $1, %zmm2, %ymm3
-; SKX-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; SKX-NEXT:    vpcmpeqd %ymm4, %ymm3, %k1
 ; SKX-NEXT:    vpcmpeqd %ymm4, %ymm2, %k2
 ; SKX-NEXT:    kmovb %k2, %eax
@@ -364,7 +364,7 @@ define <16 x double> @test16(double* %ba
 define void @test17(float* %base, <32 x float> %V, <32 x i32> %trigger) {
 ; SKX-LABEL: test17:
 ; SKX:       # BB#0:
-; SKX-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; SKX-NEXT:    vpcmpeqd %zmm4, %zmm3, %k1
 ; SKX-NEXT:    vpcmpeqd %zmm4, %zmm2, %k2
 ; SKX-NEXT:    kmovw %k2, %eax
@@ -376,7 +376,7 @@ define void @test17(float* %base, <32 x
 ;
 ; KNL-LABEL: test17:
 ; KNL:       # BB#0:
-; KNL-NEXT:    vpxord %zmm4, %zmm4, %zmm4
+; KNL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; KNL-NEXT:    vpcmpeqd %zmm4, %zmm3, %k1
 ; KNL-NEXT:    vpcmpeqd %zmm4, %zmm2, %k2
 ; KNL-NEXT:    kmovw %k2, %eax

Modified: llvm/trunk/test/CodeGen/X86/fma_patterns.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma_patterns.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma_patterns.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma_patterns.ll Thu Aug  3 01:50:18 2017
@@ -1529,7 +1529,7 @@ define <4 x double> @test_v4f64_fneg_fmu
 ;
 ; AVX512-LABEL: test_v4f64_fneg_fmul:
 ; AVX512:       # BB#0:
-; AVX512-NEXT:    vxorpd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %m = fmul nsz <4 x double> %x, %y

Modified: llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll Thu Aug  3 01:50:18 2017
@@ -1105,7 +1105,7 @@ define <16 x float> @test_v16f32_fneg_fm
 ;
 ; AVX512-LABEL: test_v16f32_fneg_fmul:
 ; AVX512:       # BB#0:
-; AVX512-NEXT:    vxorps %zmm2, %zmm2, %zmm2
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213ps %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %m = fmul nsz <16 x float> %x, %y
@@ -1130,7 +1130,7 @@ define <8 x double> @test_v8f64_fneg_fmu
 ;
 ; AVX512-LABEL: test_v8f64_fneg_fmul:
 ; AVX512:       # BB#0:
-; AVX512-NEXT:    vxorpd %zmm2, %zmm2, %zmm2
+; AVX512-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213pd %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %m = fmul nsz <8 x double> %x, %y

Modified: llvm/trunk/test/CodeGen/X86/madd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/madd.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/madd.ll (original)
+++ llvm/trunk/test/CodeGen/X86/madd.ll Thu Aug  3 01:50:18 2017
@@ -308,7 +308,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapt
 ; AVX512-LABEL: _Z9test_charPcS_i:
 ; AVX512:       # BB#0: # %entry
 ; AVX512-NEXT:    movl %edx, %eax
-; AVX512-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    .p2align 4, 0x90
 ; AVX512-NEXT:  .LBB2_1: # %vector.body

Modified: llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll Thu Aug  3 01:50:18 2017
@@ -839,7 +839,7 @@ define <4 x double> @test16(double* %bas
 ; KNL_64-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL_64-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; KNL_64-NEXT:    vpmovsxdq %xmm1, %ymm1
-; KNL_64-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_64-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_64-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
 ; KNL_64-NEXT:    vpmovsxdq %ymm0, %zmm0
 ; KNL_64-NEXT:    vpsllq $63, %zmm1, %zmm1
@@ -855,7 +855,7 @@ define <4 x double> @test16(double* %bas
 ; KNL_32-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL_32-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; KNL_32-NEXT:    vpmovsxdq %xmm1, %ymm1
-; KNL_32-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_32-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_32-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpmovsxdq %ymm0, %zmm0
@@ -893,7 +893,7 @@ define <2 x double> @test17(double* %bas
 ; KNL_64:       # BB#0:
 ; KNL_64-NEXT:    # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
 ; KNL_64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; KNL_64-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_64-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_64-NEXT:    vinserti32x4 $0, %xmm1, %zmm3, %zmm1
 ; KNL_64-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL_64-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -906,7 +906,7 @@ define <2 x double> @test17(double* %bas
 ; KNL_32:       # BB#0:
 ; KNL_32-NEXT:    # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
 ; KNL_32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; KNL_32-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_32-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_32-NEXT:    vinserti32x4 $0, %xmm1, %zmm3, %zmm1
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpsllq $63, %zmm1, %zmm1
@@ -999,7 +999,7 @@ define void @test19(<4 x double>%a1, dou
 ; KNL_64-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL_64-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; KNL_64-NEXT:    vpmovsxdq %xmm1, %ymm1
-; KNL_64-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_64-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_64-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
 ; KNL_64-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL_64-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -1014,7 +1014,7 @@ define void @test19(<4 x double>%a1, dou
 ; KNL_32-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL_32-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; KNL_32-NEXT:    vpmovsxdq %xmm1, %ymm1
-; KNL_32-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_32-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_32-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpsllq $63, %zmm1, %zmm1
@@ -1102,7 +1102,7 @@ define void @test21(<2 x i32>%a1, <2 x i
 ; KNL_64-LABEL: test21:
 ; KNL_64:       # BB#0:
 ; KNL_64-NEXT:    # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; KNL_64-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_64-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_64-NEXT:    vinserti32x4 $0, %xmm2, %zmm3, %zmm2
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_64-NEXT:    vpsllq $63, %zmm2, %zmm2
@@ -1114,7 +1114,7 @@ define void @test21(<2 x i32>%a1, <2 x i
 ; KNL_32-LABEL: test21:
 ; KNL_32:       # BB#0:
 ; KNL_32-NEXT:    # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; KNL_32-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_32-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_32-NEXT:    vinserti32x4 $0, %xmm2, %zmm3, %zmm2
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_32-NEXT:    vpsllq $63, %zmm2, %zmm2
@@ -1272,7 +1272,7 @@ define <2 x i32> @test23(i32* %base, <2
 ; KNL_64:       # BB#0:
 ; KNL_64-NEXT:    # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
 ; KNL_64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; KNL_64-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_64-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_64-NEXT:    vinserti32x4 $0, %xmm1, %zmm3, %zmm1
 ; KNL_64-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL_64-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -1285,7 +1285,7 @@ define <2 x i32> @test23(i32* %base, <2
 ; KNL_32:       # BB#0:
 ; KNL_32-NEXT:    # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
 ; KNL_32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; KNL_32-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_32-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_32-NEXT:    vinserti32x4 $0, %xmm1, %zmm3, %zmm1
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpsllq $63, %zmm1, %zmm1
@@ -1334,7 +1334,7 @@ define <2 x i32> @test24(i32* %base, <2
 ; KNL_32:       # BB#0:
 ; KNL_32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; KNL_32-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; KNL_32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; KNL_32-NEXT:    vinserti32x4 $0, {{\.LCPI.*}}, %zmm1, %zmm1
 ; KNL_32-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL_32-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -1368,7 +1368,7 @@ define <2 x i64> @test25(i64* %base, <2
 ; KNL_64:       # BB#0:
 ; KNL_64-NEXT:    # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
 ; KNL_64-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; KNL_64-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_64-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_64-NEXT:    vinserti32x4 $0, %xmm1, %zmm3, %zmm1
 ; KNL_64-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL_64-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -1381,7 +1381,7 @@ define <2 x i64> @test25(i64* %base, <2
 ; KNL_32:       # BB#0:
 ; KNL_32-NEXT:    # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
 ; KNL_32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; KNL_32-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; KNL_32-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL_32-NEXT:    vinserti32x4 $0, %xmm1, %zmm3, %zmm1
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpsllq $63, %zmm1, %zmm1
@@ -1430,7 +1430,7 @@ define <2 x i64> @test26(i64* %base, <2
 ; KNL_32-NEXT:    # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
 ; KNL_32-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; KNL_32-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL_32-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL_32-NEXT:    vinserti32x4 $0, {{\.LCPI.*}}, %zmm2, %zmm2
 ; KNL_32-NEXT:    vpsllq $63, %zmm2, %zmm2
 ; KNL_32-NEXT:    vptestmq %zmm2, %zmm2, %k1
@@ -1522,7 +1522,7 @@ define void @test28(<2 x i32>%a1, <2 x i
 ; KNL_32:       # BB#0:
 ; KNL_32-NEXT:    # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; KNL_32-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL_32-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL_32-NEXT:    vinserti32x4 $0, {{\.LCPI.*}}, %zmm2, %zmm2
 ; KNL_32-NEXT:    vpsllq $63, %zmm2, %zmm2
 ; KNL_32-NEXT:    vptestmq %zmm2, %zmm2, %k1
@@ -2156,7 +2156,7 @@ define <4 x i64> @test_pr28312(<4 x i64*
 ; KNL_64-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL_64-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; KNL_64-NEXT:    vpmovsxdq %xmm1, %ymm1
-; KNL_64-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL_64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL_64-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
 ; KNL_64-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL_64-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -2181,7 +2181,7 @@ define <4 x i64> @test_pr28312(<4 x i64*
 ; KNL_32-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL_32-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; KNL_32-NEXT:    vpmovsxdq %xmm1, %ymm1
-; KNL_32-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; KNL_32-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL_32-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
 ; KNL_32-NEXT:    vpmovsxdq %ymm0, %zmm0
 ; KNL_32-NEXT:    vpsllq $63, %zmm1, %zmm1

Modified: llvm/trunk/test/CodeGen/X86/masked_memop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_memop.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_memop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_memop.ll Thu Aug  3 01:50:18 2017
@@ -248,7 +248,7 @@ define <8 x float> @test11a(<8 x i32> %t
 ;
 ; SKX-LABEL: test11a:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %ymm2, %ymm0, %k1
 ; SKX-NEXT:    vblendmps (%rdi), %ymm1, %ymm0 {%k1}
 ; SKX-NEXT:    retq
@@ -420,7 +420,7 @@ define void @test12(<8 x i32> %trigger,
 ;
 ; SKX-LABEL: test12:
 ; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %ymm2, %ymm0, %k1
 ; SKX-NEXT:    vmovdqu32 %ymm1, (%rdi) {%k1}
 ; SKX-NEXT:    vzeroupper

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll Thu Aug  3 01:50:18 2017
@@ -351,7 +351,7 @@ define <16 x float> @merge_16f32_f32_0uu
 ; ALL-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vmovups (%rdi), %zmm1
-; ALL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; ALL-NEXT:    vmovaps {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
 ; ALL-NEXT:    vpermi2ps %zmm2, %zmm1, %zmm0
 ; ALL-NEXT:    retq
@@ -360,7 +360,7 @@ define <16 x float> @merge_16f32_f32_0uu
 ; X32-AVX512F:       # BB#0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups (%eax), %zmm1
-; X32-AVX512F-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; X32-AVX512F-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; X32-AVX512F-NEXT:    vmovaps {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
 ; X32-AVX512F-NEXT:    vpermi2ps %zmm2, %zmm1, %zmm0
 ; X32-AVX512F-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/nontemporal-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/nontemporal-2.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/nontemporal-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/nontemporal-2.ll Thu Aug  3 01:50:18 2017
@@ -253,7 +253,7 @@ define void @test_zero_v8f32(<8 x float>
 ;
 ; VLX-LABEL: test_zero_v8f32:
 ; VLX:       # BB#0:
-; VLX-NEXT:    vpxor %ymm0, %ymm0, %ymm0
+; VLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; VLX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; VLX-NEXT:    vzeroupper
 ; VLX-NEXT:    retq
@@ -278,7 +278,7 @@ define void @test_zero_v8i32(<8 x i32>*
 ;
 ; VLX-LABEL: test_zero_v8i32:
 ; VLX:       # BB#0:
-; VLX-NEXT:    vpxor %ymm0, %ymm0, %ymm0
+; VLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; VLX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; VLX-NEXT:    vzeroupper
 ; VLX-NEXT:    retq
@@ -303,7 +303,7 @@ define void @test_zero_v4f64(<4 x double
 ;
 ; VLX-LABEL: test_zero_v4f64:
 ; VLX:       # BB#0:
-; VLX-NEXT:    vpxor %ymm0, %ymm0, %ymm0
+; VLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; VLX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; VLX-NEXT:    vzeroupper
 ; VLX-NEXT:    retq
@@ -328,7 +328,7 @@ define void @test_zero_v4i64(<4 x i64>*
 ;
 ; VLX-LABEL: test_zero_v4i64:
 ; VLX:       # BB#0:
-; VLX-NEXT:    vpxor %ymm0, %ymm0, %ymm0
+; VLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; VLX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; VLX-NEXT:    vzeroupper
 ; VLX-NEXT:    retq
@@ -353,7 +353,7 @@ define void @test_zero_v16i16(<16 x i16>
 ;
 ; VLX-LABEL: test_zero_v16i16:
 ; VLX:       # BB#0:
-; VLX-NEXT:    vpxor %ymm0, %ymm0, %ymm0
+; VLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; VLX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; VLX-NEXT:    vzeroupper
 ; VLX-NEXT:    retq
@@ -378,7 +378,7 @@ define void @test_zero_v32i8(<32 x i8>*
 ;
 ; VLX-LABEL: test_zero_v32i8:
 ; VLX:       # BB#0:
-; VLX-NEXT:    vpxor %ymm0, %ymm0, %ymm0
+; VLX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; VLX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; VLX-NEXT:    vzeroupper
 ; VLX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/nontemporal-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/nontemporal-loads.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/nontemporal-loads.ll (original)
+++ llvm/trunk/test/CodeGen/X86/nontemporal-loads.ll Thu Aug  3 01:50:18 2017
@@ -1900,7 +1900,7 @@ define <16 x i32> @test_masked_v16i32(i8
 ;
 ; AVX512-LABEL: test_masked_v16i32:
 ; AVX512:       # BB#0:
-; AVX512-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; AVX512-NEXT:    vmovntdqa (%rdi), %zmm1
 ; AVX512-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}

Modified: llvm/trunk/test/CodeGen/X86/sad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sad.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sad.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sad.ll Thu Aug  3 01:50:18 2017
@@ -60,7 +60,7 @@ define i32 @sad_16i8() nounwind {
 ;
 ; AVX512F-LABEL: sad_16i8:
 ; AVX512F:       # BB#0: # %entry
-; AVX512F-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512F-NEXT:    .p2align 4, 0x90
 ; AVX512F-NEXT:  .LBB0_1: # %vector.body
@@ -86,7 +86,7 @@ define i32 @sad_16i8() nounwind {
 ;
 ; AVX512BW-LABEL: sad_16i8:
 ; AVX512BW:       # BB#0: # %entry
-; AVX512BW-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512BW-NEXT:    .p2align 4, 0x90
 ; AVX512BW-NEXT:  .LBB0_1: # %vector.body
@@ -307,9 +307,9 @@ define i32 @sad_32i8() nounwind {
 ;
 ; AVX512F-LABEL: sad_32i8:
 ; AVX512F:       # BB#0: # %entry
-; AVX512F-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512F-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    .p2align 4, 0x90
 ; AVX512F-NEXT:  .LBB1_1: # %vector.body
 ; AVX512F-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -335,9 +335,9 @@ define i32 @sad_32i8() nounwind {
 ;
 ; AVX512BW-LABEL: sad_32i8:
 ; AVX512BW:       # BB#0: # %entry
-; AVX512BW-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    .p2align 4, 0x90
 ; AVX512BW-NEXT:  .LBB1_1: # %vector.body
 ; AVX512BW-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -760,11 +760,11 @@ define i32 @sad_avx64i8() nounwind {
 ;
 ; AVX512F-LABEL: sad_avx64i8:
 ; AVX512F:       # BB#0: # %entry
-; AVX512F-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512F-NEXT:    vpxord %zmm1, %zmm1, %zmm1
-; AVX512F-NEXT:    vpxord %zmm2, %zmm2, %zmm2
-; AVX512F-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512F-NEXT:    .p2align 4, 0x90
 ; AVX512F-NEXT:  .LBB2_1: # %vector.body
 ; AVX512F-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -808,9 +808,9 @@ define i32 @sad_avx64i8() nounwind {
 ;
 ; AVX512BW-LABEL: sad_avx64i8:
 ; AVX512BW:       # BB#0: # %entry
-; AVX512BW-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    .p2align 4, 0x90
 ; AVX512BW-NEXT:  .LBB2_1: # %vector.body
 ; AVX512BW-NEXT:    # =>This Inner Loop Header: Depth=1

Modified: llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll Thu Aug  3 01:50:18 2017
@@ -103,7 +103,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -133,7 +133,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VLBWDQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VLBWDQ-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VLBWDQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -295,7 +295,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -325,7 +325,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VLBWDQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VLBWDQ-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VLBWDQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -472,7 +472,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -497,7 +497,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VLBWDQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VLBWDQ-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VLBWDQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -634,7 +634,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -659,7 +659,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VLBWDQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VLBWDQ-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VLBWDQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -781,7 +781,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -801,7 +801,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VLBWDQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VLBWDQ-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VLBWDQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -908,7 +908,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -928,7 +928,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpsrlw $4, %ymm0, %ymm4
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm4, %ymm1
-; AVX512VLBWDQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX512VLBWDQ-NEXT:    vpcmpeqb %ymm4, %ymm1, %ymm5
 ; AVX512VLBWDQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
@@ -1020,7 +1020,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm1
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm2, %ymm1
 ; AVX512VL-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
@@ -1035,7 +1035,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpsrlw $4, %ymm0, %ymm0
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; AVX512VLBWDQ-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VLBWDQ-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm1
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm2, %ymm1
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
@@ -1124,7 +1124,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm1
 ; AVX512VL-NEXT:    vpand %ymm1, %ymm2, %ymm1
 ; AVX512VL-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
@@ -1139,7 +1139,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512VLBWDQ-NEXT:    vpsrlw $4, %ymm0, %ymm0
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; AVX512VLBWDQ-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VLBWDQ-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm1
 ; AVX512VLBWDQ-NEXT:    vpand %ymm1, %ymm2, %ymm1
 ; AVX512VLBWDQ-NEXT:    vpshufb %ymm0, %ymm3, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll Thu Aug  3 01:50:18 2017
@@ -39,7 +39,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpshufb %zmm0, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsadbw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
@@ -118,7 +118,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpshufb %zmm0, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsadbw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
@@ -195,7 +195,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpshufb %zmm0, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; AVX512BW-NEXT:    vpsadbw %zmm1, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -282,7 +282,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpshufb %zmm0, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; AVX512BW-NEXT:    vpsadbw %zmm1, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]

Modified: llvm/trunk/test/CodeGen/X86/vector-popcnt-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-popcnt-512.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-popcnt-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-popcnt-512.ll Thu Aug  3 01:50:18 2017
@@ -38,7 +38,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpshufb %zmm0, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsadbw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
@@ -92,7 +92,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpshufb %zmm0, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vpaddb %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; AVX512BW-NEXT:    vpsadbw %zmm1, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll Thu Aug  3 01:50:18 2017
@@ -463,7 +463,7 @@ define <64 x i8> @ashr_const7_v64i8(<64
 ;
 ; AVX512BW-LABEL: ashr_const7_v64i8:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpcmpgtb %zmm0, %zmm1, %k0
 ; AVX512BW-NEXT:    vpmovm2b %k0, %zmm0
 ; AVX512BW-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll Thu Aug  3 01:50:18 2017
@@ -1706,7 +1706,7 @@ define <16 x i16> @shuffle_v16i16_28_zz_
 ; AVX512VL-LABEL: shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz:
 ; AVX512VL:       # BB#0:
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [28,1,2,3,29,5,6,7,30,9,10,11,31,13,14,15]
-; AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpermt2w %ymm0, %ymm2, %ymm1
 ; AVX512VL-NEXT:    vmovdqa %ymm1, %ymm0
 ; AVX512VL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v32.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v32.ll Thu Aug  3 01:50:18 2017
@@ -315,7 +315,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00
 ; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
 ; AVX512VL:       # BB#0:
 ; AVX512VL-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512VL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpbroadcastb %xmm0, %xmm0
 ; AVX512VL-NEXT:    movl $32767, %eax # imm = 0x7FFF
@@ -744,17 +744,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
-; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX2:       # BB#0:
-; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    retq
-;
-; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT:    retq
+; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
+; AVX2OR512VL:       # BB#0:
+; AVX2OR512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2OR512VL-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX2OR512VL-NEXT:    retq
   %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
   ret <32 x i8> %shuffle
 }
@@ -1150,7 +1144,7 @@ define <32 x i8> @shuffle_v32i8_00_32_00
 ; AVX512VL:       # BB#0:
 ; AVX512VL-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
-; AVX512VL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512VL-NEXT:    movl $-1431655766, %eax # imm = 0xAAAAAAAA
 ; AVX512VL-NEXT:    kmovd %eax, %k1
 ; AVX512VL-NEXT:    vpshufb %ymm2, %ymm1, %ymm0 {%k1}
@@ -1173,19 +1167,12 @@ define <32 x i8> @shuffle_v32i8_32_32_32
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
-; AVX2-LABEL: shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_48_48_48_48_48_48_48_48_24_25_26_27_28_29_30_31:
-; AVX2:       # BB#0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-NEXT:    retq
-;
-; AVX512VL-LABEL: shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_48_48_48_48_48_48_48_48_24_25_26_27_28_29_30_31:
-; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX512VL-NEXT:    retq
+; AVX2OR512VL-LABEL: shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_48_48_48_48_48_48_48_48_24_25_26_27_28_29_30_31:
+; AVX2OR512VL:       # BB#0:
+; AVX2OR512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2OR512VL-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX2OR512VL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2OR512VL-NEXT:    retq
   %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   ret <32 x i8> %shuffle
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Thu Aug  3 01:50:18 2017
@@ -535,7 +535,7 @@ define <4 x double> @shuffle_v4f64_0z3z(
 ; AVX512VL-LABEL: shuffle_v4f64_0z3z:
 ; AVX512VL:       # BB#0:
 ; AVX512VL-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
-; AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
 ; AVX512VL-NEXT:    retq
   %shuffle = shufflevector <4 x double> %a, <4 x double> <double 0.000000e+00, double undef, double undef, double undef>, <4 x i32> <i32 0, i32 4, i32 3, i32 4>
@@ -562,7 +562,7 @@ define <4 x double> @shuffle_v4f64_1z2z(
 ;
 ; AVX512VL-LABEL: shuffle_v4f64_1z2z:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
 ; AVX512VL-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
 ; AVX512VL-NEXT:    retq
@@ -1551,7 +1551,7 @@ define <4 x i64> @shuffle_v4i64_z0z3(<4
 ; AVX512VL-LABEL: shuffle_v4i64_z0z3:
 ; AVX512VL:       # BB#0:
 ; AVX512VL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,3]
-; AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX512VL-NEXT:    retq
   %1 = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 4, i32 0, i32 4, i32 3>
@@ -1578,7 +1578,7 @@ define <4 x i64> @shuffle_v4i64_1z2z(<4
 ;
 ; AVX512VL-LABEL: shuffle_v4i64_1z2z:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
 ; AVX512VL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,0,2,0]
 ; AVX512VL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v16.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v16.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v16.ll Thu Aug  3 01:50:18 2017
@@ -47,7 +47,7 @@ define <16 x float> @shuffle_v16f32_00_1
 define <16 x float> @shuffle_v16f32_00_zz_01_zz_04_zz_05_zz_08_zz_09_zz_0c_zz_0d_zz(<16 x float> %a, <16 x float> %b) {
 ; ALL-LABEL: shuffle_v16f32_00_zz_01_zz_04_zz_05_zz_08_zz_09_zz_0c_zz_0d_zz:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vxorps %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <16 x float> %a, <16 x float> zeroinitializer, <16 x i32><i32 0, i32 16, i32 1, i32 16, i32 4, i32 16, i32 5, i32 16, i32 8, i32 16, i32 9, i32 16, i32 12, i32 16, i32 13, i32 16>
@@ -75,7 +75,7 @@ define <16 x i32> @shuffle_v16i32_00_10_
 define <16 x i32> @shuffle_v16i32_zz_10_zz_11_zz_14_zz_15_zz_18_zz_19_zz_1c_zz_1d(<16 x i32> %a, <16 x i32> %b) {
 ; ALL-LABEL: shuffle_v16i32_zz_10_zz_11_zz_14_zz_15_zz_18_zz_19_zz_1c_zz_1d:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; ALL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; ALL-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <16 x i32> zeroinitializer, <16 x i32> %b, <16 x i32><i32 15, i32 16, i32 13, i32 17, i32 11, i32 20, i32 9, i32 21, i32 7, i32 24, i32 5, i32 25, i32 3, i32 28, i32 1, i32 29>
@@ -94,7 +94,7 @@ define <16 x float> @shuffle_v16f32_02_1
 define <16 x float> @shuffle_v16f32_zz_12_zz_13_zz_16_zz_17_zz_1a_zz_1b_zz_1e_zz_1f(<16 x float> %a, <16 x float> %b) {
 ; ALL-LABEL: shuffle_v16f32_zz_12_zz_13_zz_16_zz_17_zz_1a_zz_1b_zz_1e_zz_1f:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vxorps %zmm0, %zmm0, %zmm0
+; ALL-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; ALL-NEXT:    vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <16 x float> zeroinitializer, <16 x float> %b, <16 x i32><i32 0, i32 18, i32 0, i32 19, i32 4, i32 22, i32 4, i32 23, i32 6, i32 26, i32 6, i32 27, i32 8, i32 30, i32 8, i32 31>
@@ -177,7 +177,7 @@ define <16 x i32> @shuffle_v16i32_02_12_
 define <16 x i32> @shuffle_v16i32_02_zz_03_zz_06_zz_07_zz_0a_zz_0b_zz_0e_zz_0f_zz(<16 x i32> %a, <16 x i32> %b) {
 ; ALL-LABEL: shuffle_v16i32_02_zz_03_zz_06_zz_07_zz_0a_zz_0b_zz_0e_zz_0f_zz:
 ; ALL:       # BB#0:
-; ALL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vpunpckhdq {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <16 x i32> %a, <16 x i32> zeroinitializer, <16 x i32><i32 2, i32 30, i32 3, i32 28, i32 6, i32 26, i32 7, i32 24, i32 10, i32 22, i32 11, i32 20, i32 14, i32 18, i32 15, i32 16>

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v64.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v64.ll Thu Aug  3 01:50:18 2017
@@ -491,7 +491,7 @@ define <64 x i8> @shuffle_v64i8_63_zz_61
 ;
 ; AVX512VBMI-LABEL: shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_47_zz_45_zz_43_zz_41_zz_39_zz_37_zz_35_zz_33_zz_31_zz_29_zz_27_zz_25_zz_23_zz_21_zz_19_zz_17_zz_15_zz_13_zz_11_zz_9_zz_7_zz_5_zz_3_zz_1_zz:
 ; AVX512VBMI:       # BB#0:
-; AVX512VBMI-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512VBMI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VBMI-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [63,65,61,67,59,69,57,71,55,73,53,75,51,77,49,79,47,81,45,83,43,85,41,87,39,89,37,91,35,93,33,95,31,97,29,99,27,101,25,103,23,105,21,107,19,109,17,111,15,113,13,115,11,117,9,119,7,121,5,123,3,125,1,127]
 ; AVX512VBMI-NEXT:    vpermt2b %zmm1, %zmm2, %zmm0
 ; AVX512VBMI-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll Thu Aug  3 01:50:18 2017
@@ -979,14 +979,14 @@ define <8 x double> @shuffle_v8f64_f5112
 define <8 x double> @shuffle_v8f64_1z2z5z6z(<8 x double> %a, <8 x double> %b) {
 ; AVX512F-LABEL: shuffle_v8f64_1z2z5z6z:
 ; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovapd {{.*#+}} zmm2 = [1,8,2,8,5,8,6,8]
 ; AVX512F-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512F-32-LABEL: shuffle_v8f64_1z2z5z6z:
 ; AVX512F-32:       # BB#0:
-; AVX512F-32-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-32-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX512F-32-NEXT:    vmovapd {{.*#+}} zmm2 = [1,0,8,0,2,0,8,0,5,0,8,0,6,0,8,0]
 ; AVX512F-32-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
 ; AVX512F-32-NEXT:    retl
@@ -1983,13 +1983,13 @@ define <8 x double> @shuffle_v8f64_0z2z4
 ;
 ; AVX512F-LABEL: shuffle_v8f64_0z2z4z6z:
 ; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512F-32-LABEL: shuffle_v8f64_0z2z4z6z:
 ; AVX512F-32:       # BB#0:
-; AVX512F-32-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-32-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX512F-32-NEXT:    vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; AVX512F-32-NEXT:    retl
   %shuffle = shufflevector <8 x double> %a, <8 x double> zeroinitializer, <8 x i32><i32 0, i32 8, i32 2, i32 8, i32 4, i32 8, i32 6, i32 8>
@@ -2015,13 +2015,13 @@ define <8 x i64> @shuffle_v8i64_z8zazcze
 ;
 ; AVX512F-LABEL: shuffle_v8i64_z8zazcze:
 ; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512F-32-LABEL: shuffle_v8i64_z8zazcze:
 ; AVX512F-32:       # BB#0:
-; AVX512F-32-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512F-32-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-32-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; AVX512F-32-NEXT:    retl
   %shuffle = shufflevector <8 x i64> zeroinitializer, <8 x i64> %b, <8 x i32><i32 7, i32 8, i32 5, i32 10, i32 3, i32 12, i32 1, i32 14>
@@ -2047,13 +2047,13 @@ define <8 x double> @shuffle_v8f64_z9zbz
 ;
 ; AVX512F-LABEL: shuffle_v8f64_z9zbzdzf:
 ; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512F-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512F-32-LABEL: shuffle_v8f64_z9zbzdzf:
 ; AVX512F-32:       # BB#0:
-; AVX512F-32-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; AVX512F-32-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; AVX512F-32-NEXT:    vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; AVX512F-32-NEXT:    retl
   %shuffle = shufflevector <8 x double> zeroinitializer, <8 x double> %b, <8 x i32><i32 0, i32 9, i32 0, i32 11, i32 0, i32 13, i32 0, i32 15>
@@ -2079,13 +2079,13 @@ define <8 x i64> @shuffle_v8i64_1z3z5z7z
 ;
 ; AVX512F-LABEL: shuffle_v8i64_1z3z5z7z:
 ; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512F-32-LABEL: shuffle_v8i64_1z3z5z7z:
 ; AVX512F-32:       # BB#0:
-; AVX512F-32-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512F-32-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; AVX512F-32-NEXT:    retl
   %shuffle = shufflevector <8 x i64> %a, <8 x i64> zeroinitializer, <8 x i32><i32 1, i32 8, i32 3, i32 15, i32 5, i32 8, i32 7, i32 15>

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-avx512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-avx512.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-avx512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-avx512.ll Thu Aug  3 01:50:18 2017
@@ -191,7 +191,7 @@ define <8 x float> @expand5(<4 x float>
 ; SKX64-LABEL: expand5:
 ; SKX64:       # BB#0:
 ; SKX64-NEXT:    vbroadcastss %xmm0, %ymm0
-; SKX64-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; SKX64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; SKX64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
 ; SKX64-NEXT:    retq
 ;
@@ -205,7 +205,7 @@ define <8 x float> @expand5(<4 x float>
 ; SKX32-LABEL: expand5:
 ; SKX32:       # BB#0:
 ; SKX32-NEXT:    vbroadcastss %xmm0, %ymm0
-; SKX32-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; SKX32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; SKX32-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
 ; SKX32-NEXT:    retl
 ;
@@ -435,7 +435,7 @@ define <16 x float> @expand12(<8 x float
 ; SKX64:       # BB#0:
 ; SKX64-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; SKX64-NEXT:    vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
-; SKX64-NEXT:    vxorps %zmm1, %zmm1, %zmm1
+; SKX64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; SKX64-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1
 ; SKX64-NEXT:    vmovaps %zmm1, %zmm0
 ; SKX64-NEXT:    retq
@@ -444,7 +444,7 @@ define <16 x float> @expand12(<8 x float
 ; KNL64:       # BB#0:
 ; KNL64-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; KNL64-NEXT:    vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
-; KNL64-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; KNL64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; KNL64-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1
 ; KNL64-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL64-NEXT:    retq
@@ -453,7 +453,7 @@ define <16 x float> @expand12(<8 x float
 ; SKX32:       # BB#0:
 ; SKX32-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; SKX32-NEXT:    vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
-; SKX32-NEXT:    vxorps %zmm1, %zmm1, %zmm1
+; SKX32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; SKX32-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1
 ; SKX32-NEXT:    vmovaps %zmm1, %zmm0
 ; SKX32-NEXT:    retl
@@ -462,7 +462,7 @@ define <16 x float> @expand12(<8 x float
 ; KNL32:       # BB#0:
 ; KNL32-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
 ; KNL32-NEXT:    vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
-; KNL32-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; KNL32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; KNL32-NEXT:    vpermt2ps %zmm0, %zmm2, %zmm1
 ; KNL32-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL32-NEXT:    retl
@@ -473,7 +473,7 @@ define <16 x float> @expand12(<8 x float
 define <16 x float> @expand13(<8 x float> %a ) {
 ; SKX64-LABEL: expand13:
 ; SKX64:       # BB#0:
-; SKX64-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; SKX64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; SKX64-NEXT:    vinsertf32x8 $1, %ymm0, %zmm1, %zmm0
 ; SKX64-NEXT:    retq
 ;
@@ -485,7 +485,7 @@ define <16 x float> @expand13(<8 x float
 ;
 ; SKX32-LABEL: expand13:
 ; SKX32:       # BB#0:
-; SKX32-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; SKX32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; SKX32-NEXT:    vinsertf32x8 $1, %ymm0, %zmm1, %zmm0
 ; SKX32-NEXT:    retl
 ;

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll Thu Aug  3 01:50:18 2017
@@ -1081,14 +1081,14 @@ define <32 x i16> @combine_vpermt2var_vp
 define <8 x double> @combine_vpermi2var_vpermvar_8f64_as_vperm2_zero(<8 x double> %x0) {
 ; X32-LABEL: combine_vpermi2var_vpermvar_8f64_as_vperm2_zero:
 ; X32:       # BB#0:
-; X32-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X32-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; X32-NEXT:    vmovapd {{.*#+}} zmm2 = [8,0,3,0,10,0,11,0,1,0,7,0,14,0,5,0]
 ; X32-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermi2var_vpermvar_8f64_as_vperm2_zero:
 ; X64:       # BB#0:
-; X64-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vmovapd {{.*#+}} zmm2 = [8,3,10,11,1,7,14,5]
 ; X64-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
 ; X64-NEXT:    retq
@@ -1100,14 +1100,14 @@ define <8 x double> @combine_vpermi2var_
 define <16 x float> @combine_vpermi2var_vpermvar_16f32_as_vperm2_zero(<16 x float> %x0) {
 ; X32-LABEL: combine_vpermi2var_vpermvar_16f32_as_vperm2_zero:
 ; X32:       # BB#0:
-; X32-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X32-NEXT:    vmovaps {{.*#+}} zmm2 = [0,13,1,12,4,9,22,12,4,25,26,9,5,29,30,8]
 ; X32-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermi2var_vpermvar_16f32_as_vperm2_zero:
 ; X64:       # BB#0:
-; X64-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vmovaps {{.*#+}} zmm2 = [0,13,1,12,4,9,22,12,4,25,26,9,5,29,30,8]
 ; X64-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll Thu Aug  3 01:50:18 2017
@@ -187,7 +187,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %
 ; AVX512F:       # BB#0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <8,2,10,u,3,u,2,u>
 ; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    vpsllq $63, %zmm2, %zmm0
@@ -201,7 +201,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %
 ; VL_BW_DQ:       # BB#0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
-; VL_BW_DQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; VL_BW_DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL_BW_DQ-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <8,2,10,u,3,u,2,u>
 ; VL_BW_DQ-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
 ; VL_BW_DQ-NEXT:    vpmovq2m %zmm2, %k0
@@ -249,7 +249,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a
 ; AVX512F:       # BB#0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [8,6,1,0,3,7,7,0]
 ; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    vpsllq $63, %zmm2, %zmm0
@@ -263,7 +263,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a
 ; VL_BW_DQ:       # BB#0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
-; VL_BW_DQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; VL_BW_DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL_BW_DQ-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [8,6,1,0,3,7,7,0]
 ; VL_BW_DQ-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
 ; VL_BW_DQ-NEXT:    vpmovq2m %zmm2, %k0
@@ -283,7 +283,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
-; AVX512F-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
 ; AVX512F-NEXT:    vpsllq $63, %zmm2, %zmm0
 ; AVX512F-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -297,7 +297,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
-; VL_BW_DQ-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; VL_BW_DQ-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; VL_BW_DQ-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
 ; VL_BW_DQ-NEXT:    vpmovq2m %zmm2, %k0
 ; VL_BW_DQ-NEXT:    kmovd %k0, %eax

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll Thu Aug  3 01:50:18 2017
@@ -59,7 +59,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ;
 ; AVX512CDVL-LABEL: testv4i64:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; AVX512CDVL-NEXT:    vpand %ymm2, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
@@ -175,7 +175,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ;
 ; AVX512CDVL-LABEL: testv4i64u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vplzcntq %ymm0, %ymm0
@@ -287,7 +287,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ;
 ; AVX512CDVL-LABEL: testv8i32:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; AVX512CDVL-NEXT:    vpand %ymm2, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
@@ -428,7 +428,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ;
 ; AVX512CDVL-LABEL: testv8i32u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubd %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vplzcntd %ymm0, %ymm0
@@ -539,7 +539,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ;
 ; AVX512CDVL-LABEL: testv16i16:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -669,7 +669,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ;
 ; AVX512CDVL-LABEL: testv16i16u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -790,7 +790,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ;
 ; AVX512CDVL-LABEL: testv32i8:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -907,7 +907,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ;
 ; AVX512CDVL-LABEL: testv32i8u:
 ; AVX512CDVL:       # BB#0:
-; AVX512CDVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512CDVL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll Thu Aug  3 01:50:18 2017
@@ -7,7 +7,7 @@
 define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
 ; AVX512CD-LABEL: testv8i64:
 ; AVX512CD:       # BB#0:
-; AVX512CD-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512CD-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512CD-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -35,7 +35,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ;
 ; AVX512CDBW-LABEL: testv8i64:
 ; AVX512CDBW:       # BB#0:
-; AVX512CDBW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
 ; AVX512CDBW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
 ; AVX512CDBW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
@@ -53,7 +53,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ;
 ; AVX512BW-LABEL: testv8i64:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
@@ -71,7 +71,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv8i64:
 ; AVX512VPOPCNTDQ:       # BB#0:
-; AVX512VPOPCNTDQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512VPOPCNTDQ-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -85,7 +85,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
 ; AVX512CD-LABEL: testv8i64u:
 ; AVX512CD:       # BB#0:
-; AVX512CD-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512CD-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512CD-NEXT:    vplzcntq %zmm0, %zmm0
@@ -95,7 +95,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 ;
 ; AVX512CDBW-LABEL: testv8i64u:
 ; AVX512CDBW:       # BB#0:
-; AVX512CDBW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512CDBW-NEXT:    vplzcntq %zmm0, %zmm0
@@ -105,7 +105,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 ;
 ; AVX512BW-LABEL: testv8i64u:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
@@ -123,7 +123,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv8i64u:
 ; AVX512VPOPCNTDQ:       # BB#0:
-; AVX512VPOPCNTDQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512VPOPCNTDQ-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -137,7 +137,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
 ; AVX512CD-LABEL: testv16i32:
 ; AVX512CD:       # BB#0:
-; AVX512CD-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512CD-NEXT:    vpandd %zmm1, %zmm0, %zmm0
 ; AVX512CD-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -173,7 +173,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ;
 ; AVX512CDBW-LABEL: testv16i32:
 ; AVX512CDBW:       # BB#0:
-; AVX512CDBW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubd %zmm0, %zmm1, %zmm2
 ; AVX512CDBW-NEXT:    vpandd %zmm2, %zmm0, %zmm0
 ; AVX512CDBW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
@@ -195,7 +195,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ;
 ; AVX512BW-LABEL: testv16i32:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubd %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpandd %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
@@ -217,7 +217,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv16i32:
 ; AVX512VPOPCNTDQ:       # BB#0:
-; AVX512VPOPCNTDQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpandd %zmm1, %zmm0, %zmm0
 ; AVX512VPOPCNTDQ-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -231,7 +231,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
 ; AVX512CD-LABEL: testv16i32u:
 ; AVX512CD:       # BB#0:
-; AVX512CD-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512CD-NEXT:    vpandd %zmm1, %zmm0, %zmm0
 ; AVX512CD-NEXT:    vplzcntd %zmm0, %zmm0
@@ -241,7 +241,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 ;
 ; AVX512CDBW-LABEL: testv16i32u:
 ; AVX512CDBW:       # BB#0:
-; AVX512CDBW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandd %zmm1, %zmm0, %zmm0
 ; AVX512CDBW-NEXT:    vplzcntd %zmm0, %zmm0
@@ -251,7 +251,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 ;
 ; AVX512BW-LABEL: testv16i32u:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubd %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpandd %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
@@ -273,7 +273,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv16i32u:
 ; AVX512VPOPCNTDQ:       # BB#0:
-; AVX512VPOPCNTDQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpandd %zmm1, %zmm0, %zmm0
 ; AVX512VPOPCNTDQ-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -319,7 +319,7 @@ define <32 x i16> @testv32i16(<32 x i16>
 ;
 ; AVX512CDBW-LABEL: testv32i16:
 ; AVX512CDBW:       # BB#0:
-; AVX512CDBW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512CDBW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -339,7 +339,7 @@ define <32 x i16> @testv32i16(<32 x i16>
 ;
 ; AVX512BW-LABEL: testv32i16:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -413,7 +413,7 @@ define <32 x i16> @testv32i16u(<32 x i16
 ;
 ; AVX512CDBW-LABEL: testv32i16u:
 ; AVX512CDBW:       # BB#0:
-; AVX512CDBW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512CDBW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -433,7 +433,7 @@ define <32 x i16> @testv32i16u(<32 x i16
 ;
 ; AVX512BW-LABEL: testv32i16u:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -501,7 +501,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
 ;
 ; AVX512CDBW-LABEL: testv64i8:
 ; AVX512CDBW:       # BB#0:
-; AVX512CDBW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512CDBW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -518,7 +518,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
 ;
 ; AVX512BW-LABEL: testv64i8:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -591,7 +591,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
 ;
 ; AVX512CDBW-LABEL: testv64i8u:
 ; AVX512CDBW:       # BB#0:
-; AVX512CDBW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512CDBW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
@@ -608,7 +608,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
 ;
 ; AVX512BW-LABEL: testv64i8u:
 ; AVX512BW:       # BB#0:
-; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1

Modified: llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll?rev=309926&r1=309925&r2=309926&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll Thu Aug  3 01:50:18 2017
@@ -130,19 +130,12 @@ define <32 x i8> @signbit_sel_v32i8(<32
 ; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: signbit_sel_v32i8:
-; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT:    retq
-;
-; AVX512VL-LABEL: signbit_sel_v32i8:
-; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX512VL-NEXT:    retq
+; AVX512-LABEL: signbit_sel_v32i8:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
   %tr = icmp slt <32 x i8> %mask, zeroinitializer
   %z = select <32 x i1> %tr, <32 x i8> %x, <32 x i8> %y
   ret <32 x i8> %z
@@ -170,19 +163,12 @@ define <16 x i16> @signbit_sel_v16i16(<1
 ; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: signbit_sel_v16i16:
-; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX512F-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT:    retq
-;
-; AVX512VL-LABEL: signbit_sel_v16i16:
-; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX512VL-NEXT:    retq
+; AVX512-LABEL: signbit_sel_v16i16:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
   %tr = icmp slt <16 x i16> %mask, zeroinitializer
   %z = select <16 x i1> %tr, <16 x i16> %x, <16 x i16> %y
   ret <16 x i16> %z
@@ -207,7 +193,7 @@ define <8 x i32> @signbit_sel_v8i32(<8 x
 ;
 ; AVX512VL-LABEL: signbit_sel_v8i32:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtd %ymm2, %ymm3, %k1
 ; AVX512VL-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
 ; AVX512VL-NEXT:    retq
@@ -224,7 +210,7 @@ define <4 x i64> @signbit_sel_v4i64(<4 x
 ;
 ; AVX512VL-LABEL: signbit_sel_v4i64:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
 ; AVX512VL-NEXT:    vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
 ; AVX512VL-NEXT:    retq
@@ -241,7 +227,7 @@ define <4 x double> @signbit_sel_v4f64(<
 ;
 ; AVX512VL-LABEL: signbit_sel_v4f64:
 ; AVX512VL:       # BB#0:
-; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
 ; AVX512VL-NEXT:    vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
 ; AVX512VL-NEXT:    retq
@@ -296,7 +282,7 @@ define <8 x double> @signbit_sel_v8f64(<
 ;
 ; AVX512-LABEL: signbit_sel_v8f64:
 ; AVX512:       # BB#0:
-; AVX512-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512-NEXT:    vpcmpgtq %zmm2, %zmm3, %k1
 ; AVX512-NEXT:    vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
 ; AVX512-NEXT:    retq




More information about the llvm-commits mailing list