[llvm-branch-commits] [llvm] c5a7e75 - Update *-inseltpoison.ll tests at Transforms/InstCombine/X86 by replacing undef with poison (NFC)

Juneyoung Lee via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sun Jan 3 05:45:07 PST 2021


Author: Juneyoung Lee
Date: 2021-01-03T22:40:05+09:00
New Revision: c5a7e75eb6b5129967f9b936425e5fd9633a42f5

URL: https://github.com/llvm/llvm-project/commit/c5a7e75eb6b5129967f9b936425e5fd9633a42f5
DIFF: https://github.com/llvm/llvm-project/commit/c5a7e75eb6b5129967f9b936425e5fd9633a42f5.diff

LOG: Update *-inseltpoison.ll tests at Transforms/InstCombine/X86 by replacing undef with poison (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll
    llvm/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll
    llvm/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll
    llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll
    llvm/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll b/llvm/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll
index ff8842cd15ea..4cc89fc80f29 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll
@@ -5,99 +5,99 @@
 ; UNDEF Elts
 ;
 
-define <8 x i16> @undef_packssdw_128() {
-; CHECK-LABEL: @undef_packssdw_128(
+define <8 x i16> @poison_packssdw_128() {
+; CHECK-LABEL: @poison_packssdw_128(
 ; CHECK-NEXT:    ret <8 x i16> undef
 ;
-  %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> undef, <4 x i32> undef)
+  %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> poison, <4 x i32> poison)
   ret <8 x i16> %1
 }
 
-define <8 x i16> @undef_packusdw_128() {
-; CHECK-LABEL: @undef_packusdw_128(
+define <8 x i16> @poison_packusdw_128() {
+; CHECK-LABEL: @poison_packusdw_128(
 ; CHECK-NEXT:    ret <8 x i16> undef
 ;
-  %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> undef, <4 x i32> undef)
+  %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> poison, <4 x i32> poison)
   ret <8 x i16> %1
 }
 
-define <16 x i8> @undef_packsswb_128() {
-; CHECK-LABEL: @undef_packsswb_128(
+define <16 x i8> @poison_packsswb_128() {
+; CHECK-LABEL: @poison_packsswb_128(
 ; CHECK-NEXT:    ret <16 x i8> undef
 ;
-  %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> undef, <8 x i16> undef)
+  %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> poison, <8 x i16> poison)
   ret <16 x i8> %1
 }
 
-define <16 x i8> @undef_packuswb_128() {
-; CHECK-LABEL: @undef_packuswb_128(
+define <16 x i8> @poison_packuswb_128() {
+; CHECK-LABEL: @poison_packuswb_128(
 ; CHECK-NEXT:    ret <16 x i8> undef
 ;
-  %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> undef, <8 x i16> undef)
+  %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> poison, <8 x i16> poison)
   ret <16 x i8> %1
 }
 
-define <16 x i16> @undef_packssdw_256() {
-; CHECK-LABEL: @undef_packssdw_256(
+define <16 x i16> @poison_packssdw_256() {
+; CHECK-LABEL: @poison_packssdw_256(
 ; CHECK-NEXT:    ret <16 x i16> undef
 ;
-  %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> undef, <8 x i32> undef)
+  %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> poison, <8 x i32> poison)
   ret <16 x i16> %1
 }
 
-define <16 x i16> @undef_packusdw_256() {
-; CHECK-LABEL: @undef_packusdw_256(
+define <16 x i16> @poison_packusdw_256() {
+; CHECK-LABEL: @poison_packusdw_256(
 ; CHECK-NEXT:    ret <16 x i16> undef
 ;
-  %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> undef, <8 x i32> undef)
+  %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> poison, <8 x i32> poison)
   ret <16 x i16> %1
 }
 
-define <32 x i8> @undef_packsswb_256() {
-; CHECK-LABEL: @undef_packsswb_256(
+define <32 x i8> @poison_packsswb_256() {
+; CHECK-LABEL: @poison_packsswb_256(
 ; CHECK-NEXT:    ret <32 x i8> undef
 ;
-  %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> undef, <16 x i16> undef)
+  %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> poison, <16 x i16> poison)
   ret <32 x i8> %1
 }
 
-define <32 x i8> @undef_packuswb_256() {
-; CHECK-LABEL: @undef_packuswb_256(
+define <32 x i8> @poison_packuswb_256() {
+; CHECK-LABEL: @poison_packuswb_256(
 ; CHECK-NEXT:    ret <32 x i8> undef
 ;
-  %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> undef, <16 x i16> undef)
+  %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> poison, <16 x i16> poison)
   ret <32 x i8> %1
 }
 
-define <32 x i16> @undef_packssdw_512() {
-; CHECK-LABEL: @undef_packssdw_512(
+define <32 x i16> @poison_packssdw_512() {
+; CHECK-LABEL: @poison_packssdw_512(
 ; CHECK-NEXT:    ret <32 x i16> undef
 ;
-  %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> undef, <16 x i32> undef)
+  %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> poison, <16 x i32> poison)
   ret <32 x i16> %1
 }
 
-define <32 x i16> @undef_packusdw_512() {
-; CHECK-LABEL: @undef_packusdw_512(
+define <32 x i16> @poison_packusdw_512() {
+; CHECK-LABEL: @poison_packusdw_512(
 ; CHECK-NEXT:    ret <32 x i16> undef
 ;
-  %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> undef, <16 x i32> undef)
+  %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> poison, <16 x i32> poison)
   ret <32 x i16> %1
 }
 
-define <64 x i8> @undef_packsswb_512() {
-; CHECK-LABEL: @undef_packsswb_512(
+define <64 x i8> @poison_packsswb_512() {
+; CHECK-LABEL: @poison_packsswb_512(
 ; CHECK-NEXT:    ret <64 x i8> undef
 ;
-  %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> undef, <32 x i16> undef)
+  %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> poison, <32 x i16> poison)
   ret <64 x i8> %1
 }
 
-define <64 x i8> @undef_packuswb_512() {
-; CHECK-LABEL: @undef_packuswb_512(
+define <64 x i8> @poison_packuswb_512() {
+; CHECK-LABEL: @poison_packuswb_512(
 ; CHECK-NEXT:    ret <64 x i8> undef
 ;
-  %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> undef, <32 x i16> undef)
+  %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> poison, <32 x i16> poison)
   ret <64 x i8> %1
 }
 
@@ -115,17 +115,17 @@ define <8 x i16> @fold_packssdw_128() {
 
 define <8 x i16> @fold_packusdw_128() {
 ; CHECK-LABEL: @fold_packusdw_128(
-; CHECK-NEXT:    ret <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 0, i16 0, i16 -32768, i16 -1>
+; CHECK-NEXT:    ret <8 x i16> <i16 poison, i16 poison, i16 poison, i16 poison, i16 0, i16 0, i16 -32768, i16 -1>
 ;
-  %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> undef, <4 x i32> <i32 0, i32 -1, i32 32768, i32 65537>)
+  %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> poison, <4 x i32> <i32 0, i32 -1, i32 32768, i32 65537>)
   ret <8 x i16> %1
 }
 
 define <16 x i8> @fold_packsswb_128() {
 ; CHECK-LABEL: @fold_packsswb_128(
-; CHECK-NEXT:    ret <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>
+; CHECK-NEXT:    ret <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>
 ;
-  %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> undef)
+  %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> poison)
   ret <16 x i8> %1
 }
 
@@ -139,9 +139,9 @@ define <16 x i8> @fold_packuswb_128() {
 
 define <16 x i16> @fold_packssdw_256() {
 ; CHECK-LABEL: @fold_packssdw_256(
-; CHECK-NEXT:    ret <16 x i16> <i16 0, i16 256, i16 32767, i16 -32768, i16 undef, i16 undef, i16 undef, i16 undef, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 undef, i16 undef, i16 undef, i16 undef>
+; CHECK-NEXT:    ret <16 x i16> <i16 0, i16 256, i16 32767, i16 -32768, i16 poison, i16 poison, i16 poison, i16 poison, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 poison, i16 poison, i16 poison, i16 poison>
 ;
-  %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> <i32 0, i32 256, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767>, <8 x i32> undef)
+  %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> <i32 0, i32 256, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767>, <8 x i32> poison)
   ret <16 x i16> %1
 }
 
@@ -155,9 +155,9 @@ define <16 x i16> @fold_packusdw_256() {
 
 define <32 x i8> @fold_packsswb_256() {
 ; CHECK-LABEL: @fold_packsswb_256(
-; CHECK-NEXT:    ret <32 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+; CHECK-NEXT:    ret <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
 ;
-  %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> undef, <16 x i16> zeroinitializer)
+  %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> poison, <16 x i16> zeroinitializer)
   ret <32 x i8> %1
 }
 
@@ -171,9 +171,9 @@ define <32 x i8> @fold_packuswb_256() {
 
 define <32 x i16> @fold_packssdw_512() {
 ; CHECK-LABEL: @fold_packssdw_512(
-; CHECK-NEXT:    ret <32 x i16> <i16 0, i16 512, i16 32767, i16 -32768, i16 undef, i16 undef, i16 undef, i16 undef, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 undef, i16 undef, i16 undef, i16 undef, i16 0, i16 512, i16 32767, i16 -32768, i16 undef, i16 undef, i16 undef, i16 undef, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 undef, i16 undef, i16 undef, i16 undef>
+; CHECK-NEXT:    ret <32 x i16> <i16 0, i16 512, i16 32767, i16 -32768, i16 poison, i16 poison, i16 poison, i16 poison, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 poison, i16 poison, i16 poison, i16 poison, i16 0, i16 512, i16 32767, i16 -32768, i16 poison, i16 poison, i16 poison, i16 poison, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 poison, i16 poison, i16 poison, i16 poison>
 ;
-  %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> <i32 0, i32 512, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767, i32 0, i32 512, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767>, <16 x i32> undef)
+  %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> <i32 0, i32 512, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767, i32 0, i32 512, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767>, <16 x i32> poison)
   ret <32 x i16> %1
 }
 
@@ -187,9 +187,9 @@ define <32 x i16> @fold_packusdw_512() {
 
 define <64 x i8> @fold_packsswb_512() {
 ; CHECK-LABEL: @fold_packsswb_512(
-; CHECK-NEXT:    ret <64 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+; CHECK-NEXT:    ret <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
 ;
-  %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> undef, <32 x i16> zeroinitializer)
+  %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> poison, <32 x i16> zeroinitializer)
   ret <64 x i8> %1
 }
 
@@ -211,8 +211,8 @@ define <8 x i16> @elts_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef>
 ; CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 ;
-  %1 = shufflevector <4 x i32> %a0, <4 x i32> poison, <4 x i32> <i32 3, i32 1, i32 undef, i32 undef>
-  %2 = shufflevector <4 x i32> %a1, <4 x i32> poison, <4 x i32> <i32 undef, i32 2, i32 1, i32 undef>
+  %1 = shufflevector <4 x i32> %a0, <4 x i32> poison, <4 x i32> <i32 3, i32 1, i32 poison, i32 poison>
+  %2 = shufflevector <4 x i32> %a1, <4 x i32> poison, <4 x i32> <i32 poison, i32 2, i32 1, i32 poison>
   %3 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %1, <4 x i32> %2)
   %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 7, i32 7, i32 7, i32 7>
   ret <8 x i16> %4
@@ -227,7 +227,7 @@ define <8 x i16> @elts_packusdw_128(<4 x i32> %a0, <4 x i32> %a1) {
   %1 = insertelement <4 x i32> %a0, i32 0, i32 0
   %2 = insertelement <4 x i32> %a1, i32 0, i32 3
   %3 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %1, <4 x i32> %2)
-  %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 undef>
+  %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> <i32 poison, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 poison>
   ret <8 x i16> %4
 }
 
@@ -260,9 +260,9 @@ define <16 x i16> @elts_packssdw_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; CHECK-NEXT:    ret <16 x i16> [[TMP2]]
 ;
   %1 = shufflevector <8 x i32> %a0, <8 x i32> poison, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %2 = shufflevector <8 x i32> %a1, <8 x i32> poison, <8 x i32> <i32 undef, i32 2, i32 1, i32 undef, i32 undef, i32 6, i32 5, i32 undef>
+  %2 = shufflevector <8 x i32> %a1, <8 x i32> poison, <8 x i32> <i32 poison, i32 2, i32 1, i32 poison, i32 poison, i32 6, i32 5, i32 poison>
   %3 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %1, <8 x i32> %2)
-  %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> <i32 undef, i32 undef, i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 undef, i32 undef, i32 11, i32 12, i32 undef, i32 undef, i32 15>
+  %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> <i32 poison, i32 poison, i32 2, i32 3, i32 4, i32 poison, i32 poison, i32 7, i32 8, i32 poison, i32 poison, i32 11, i32 12, i32 poison, i32 poison, i32 15>
   ret <16 x i16> %4
 }
 
@@ -276,7 +276,7 @@ define <16 x i16> @elts_packusdw_256(<8 x i32> %a0, <8 x i32> %a1) {
   %1 = shufflevector <8 x i32> %a0, <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %2 = shufflevector <8 x i32> %a1, <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   %3 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %1, <8 x i32> %2)
-  %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef>
+  %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 12, i32 13, i32 14, i32 15, i32 poison, i32 poison, i32 poison, i32 poison>
   ret <16 x i16> %4
 }
 
@@ -309,9 +309,9 @@ define <32 x i16> @elts_packssdw_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-NEXT:    ret <32 x i16> [[TMP2]]
 ;
   %1 = shufflevector <16 x i32> %a0, <16 x i32> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 9, i32 8, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  %2 = shufflevector <16 x i32> %a1, <16 x i32> poison, <16 x i32> <i32 undef, i32 2, i32 1, i32 undef, i32 undef, i32 6, i32 5, i32 undef, i32 undef, i32 10, i32 9, i32 undef, i32 undef, i32 14, i32 13, i32 undef>
+  %2 = shufflevector <16 x i32> %a1, <16 x i32> poison, <16 x i32> <i32 poison, i32 2, i32 1, i32 poison, i32 poison, i32 6, i32 5, i32 poison, i32 poison, i32 10, i32 9, i32 poison, i32 poison, i32 14, i32 13, i32 poison>
   %3 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %1, <16 x i32> %2)
-  %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> <i32 undef, i32 undef, i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 undef, i32 undef, i32 11, i32 12, i32 undef, i32 undef, i32 15, i32 undef, i32 undef, i32 18, i32 19, i32 20, i32 undef, i32 undef, i32 23, i32 24, i32 undef, i32 undef, i32 27, i32 28, i32 undef, i32 undef, i32 31>
+  %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> <i32 poison, i32 poison, i32 2, i32 3, i32 4, i32 poison, i32 poison, i32 7, i32 8, i32 poison, i32 poison, i32 11, i32 12, i32 poison, i32 poison, i32 15, i32 poison, i32 poison, i32 18, i32 19, i32 20, i32 poison, i32 poison, i32 23, i32 24, i32 poison, i32 poison, i32 27, i32 28, i32 poison, i32 poison, i32 31>
   ret <32 x i16> %4
 }
 
@@ -325,7 +325,7 @@ define <32 x i16> @elts_packusdw_512(<16 x i32> %a0, <16 x i32> %a1) {
   %1 = shufflevector <16 x i32> %a0, <16 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %2 = shufflevector <16 x i32> %a1, <16 x i32> poison, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
   %3 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %1, <16 x i32> %2)
-  %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 20, i32 21, i32 22, i32 23, i32 undef, i32 undef, i32 undef, i32 undef, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef>
+  %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 12, i32 13, i32 14, i32 15, i32 poison, i32 poison, i32 poison, i32 poison, i32 20, i32 21, i32 22, i32 23, i32 poison, i32 poison, i32 poison, i32 poison, i32 28, i32 29, i32 30, i32 31, i32 poison, i32 poison, i32 poison, i32 poison>
   ret <32 x i16> %4
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll b/llvm/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll
index f5094f85a8a4..35781159e1b9 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll
@@ -415,56 +415,56 @@ define <64 x i8> @permute3_avx512(<64 x i8> %InVec) {
   ret <64 x i8> %1
 }
 
-; FIXME: Verify that instcombine is able to fold constant byte shuffles with undef mask elements.
+; FIXME: Verify that instcombine is able to fold constant byte shuffles with poison mask elements.
 
-define <16 x i8> @fold_with_undef_elts(<16 x i8> %InVec) {
-; CHECK-LABEL: @fold_with_undef_elts(
+define <16 x i8> @fold_with_poison_elts(<16 x i8> %InVec) {
+; CHECK-LABEL: @fold_with_poison_elts(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <16 x i8> [[INVEC:%.*]], <16 x i8> <i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, <16 x i32> <i32 0, i32 16, i32 undef, i32 16, i32 1, i32 16, i32 undef, i32 16, i32 2, i32 16, i32 undef, i32 16, i32 3, i32 16, i32 undef, i32 16>
 ; CHECK-NEXT:    ret <16 x i8> [[TMP1]]
 ;
-  %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 0, i8 -128, i8 undef, i8 -128, i8 1, i8 -128, i8 undef, i8 -128, i8 2, i8 -128, i8 undef, i8 -128, i8 3, i8 -128, i8 undef, i8 -128>)
+  %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 0, i8 -128, i8 poison, i8 -128, i8 1, i8 -128, i8 poison, i8 -128, i8 2, i8 -128, i8 poison, i8 -128, i8 3, i8 -128, i8 poison, i8 -128>)
   ret <16 x i8> %1
 }
 
-define <32 x i8> @fold_with_undef_elts_avx2(<32 x i8> %InVec) {
-; CHECK-LABEL: @fold_with_undef_elts_avx2(
+define <32 x i8> @fold_with_poison_elts_avx2(<32 x i8> %InVec) {
+; CHECK-LABEL: @fold_with_poison_elts_avx2(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <32 x i8> [[INVEC:%.*]], <32 x i8> <i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, <32 x i32> <i32 0, i32 32, i32 undef, i32 32, i32 1, i32 32, i32 undef, i32 32, i32 2, i32 32, i32 undef, i32 32, i32 3, i32 32, i32 undef, i32 32, i32 16, i32 48, i32 undef, i32 48, i32 17, i32 48, i32 undef, i32 48, i32 18, i32 48, i32 undef, i32 48, i32 19, i32 48, i32 undef, i32 48>
 ; CHECK-NEXT:    ret <32 x i8> [[TMP1]]
 ;
-  %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 0, i8 -128, i8 undef, i8 -128, i8 1, i8 -128, i8 undef, i8 -128, i8 2, i8 -128, i8 undef, i8 -128, i8 3, i8 -128, i8 undef, i8 -128, i8 0, i8 -128, i8 undef, i8 -128, i8 1, i8 -128, i8 undef, i8 -128, i8 2, i8 -128, i8 undef, i8 -128, i8 3, i8 -128, i8 undef, i8 -128>)
+  %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 0, i8 -128, i8 poison, i8 -128, i8 1, i8 -128, i8 poison, i8 -128, i8 2, i8 -128, i8 poison, i8 -128, i8 3, i8 -128, i8 poison, i8 -128, i8 0, i8 -128, i8 poison, i8 -128, i8 1, i8 -128, i8 poison, i8 -128, i8 2, i8 -128, i8 poison, i8 -128, i8 3, i8 -128, i8 poison, i8 -128>)
   ret <32 x i8> %1
 }
 
-define <64 x i8> @fold_with_undef_elts_avx512(<64 x i8> %InVec) {
-; CHECK-LABEL: @fold_with_undef_elts_avx512(
+define <64 x i8> @fold_with_poison_elts_avx512(<64 x i8> %InVec) {
+; CHECK-LABEL: @fold_with_poison_elts_avx512(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <64 x i8> [[INVEC:%.*]], <64 x i8> <i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, <64 x i32> <i32 0, i32 64, i32 undef, i32 64, i32 1, i32 64, i32 undef, i32 64, i32 2, i32 64, i32 undef, i32 64, i32 3, i32 64, i32 undef, i32 64, i32 16, i32 80, i32 undef, i32 80, i32 17, i32 80, i32 undef, i32 80, i32 18, i32 80, i32 undef, i32 80, i32 19, i32 80, i32 undef, i32 80, i32 32, i32 96, i32 undef, i32 96, i32 33, i32 96, i32 undef, i32 96, i32 34, i32 96, i32 undef, i32 96, i32 35, i32 96, i32 undef, i32 96, i32 48, i32 112, i32 undef, i32 112, i32 49, i32 112, i32 undef, i32 112, i32 50, i32 112, i32 undef, i32 112, i32 51, i32 112, i32 undef, i32 112>
 ; CHECK-NEXT:    ret <64 x i8> [[TMP1]]
 ;
-  %1 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> <i8 0, i8 -128, i8 undef, i8 -128, i8 1, i8 -128, i8 undef, i8 -128, i8 2, i8 -128, i8 undef, i8 -128, i8 3, i8 -128, i8 undef, i8 -128, i8 0, i8 -128, i8 undef, i8 -128, i8 1, i8 -128, i8 undef, i8 -128, i8 2, i8 -128, i8 undef, i8 -128, i8 3, i8 -128, i8 undef, i8 -128, i8 0, i8 -128, i8 undef, i8 -128, i8 1, i8 -128, i8 undef, i8 -128, i8 2, i8 -128, i8 undef, i8 -128, i8 3, i8 -128, i8 undef, i8 -128, i8 0, i8 -128, i8 undef, i8 -128, i8 1, i8 -128, i8 undef, i8 -128, i8 2, i8 -128, i8 undef, i8 -128, i8 3, i8 -128, i8 undef, i8 -128>)
+  %1 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> <i8 0, i8 -128, i8 poison, i8 -128, i8 1, i8 -128, i8 poison, i8 -128, i8 2, i8 -128, i8 poison, i8 -128, i8 3, i8 -128, i8 poison, i8 -128, i8 0, i8 -128, i8 poison, i8 -128, i8 1, i8 -128, i8 poison, i8 -128, i8 2, i8 -128, i8 poison, i8 -128, i8 3, i8 -128, i8 poison, i8 -128, i8 0, i8 -128, i8 poison, i8 -128, i8 1, i8 -128, i8 poison, i8 -128, i8 2, i8 -128, i8 poison, i8 -128, i8 3, i8 -128, i8 poison, i8 -128, i8 0, i8 -128, i8 poison, i8 -128, i8 1, i8 -128, i8 poison, i8 -128, i8 2, i8 -128, i8 poison, i8 -128, i8 3, i8 -128, i8 poison, i8 -128>)
   ret <64 x i8> %1
 }
 
-define <16 x i8> @fold_with_allundef_elts(<16 x i8> %InVec) {
-; CHECK-LABEL: @fold_with_allundef_elts(
+define <16 x i8> @fold_with_allpoison_elts(<16 x i8> %InVec) {
+; CHECK-LABEL: @fold_with_allpoison_elts(
 ; CHECK-NEXT:    ret <16 x i8> undef
 ;
-  %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> undef)
+  %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> poison)
   ret <16 x i8> %1
 }
 
-define <32 x i8> @fold_with_allundef_elts_avx2(<32 x i8> %InVec) {
-; CHECK-LABEL: @fold_with_allundef_elts_avx2(
+define <32 x i8> @fold_with_allpoison_elts_avx2(<32 x i8> %InVec) {
+; CHECK-LABEL: @fold_with_allpoison_elts_avx2(
 ; CHECK-NEXT:    ret <32 x i8> undef
 ;
-  %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> undef)
+  %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> poison)
   ret <32 x i8> %1
 }
 
-define <64 x i8> @fold_with_allundef_elts_avx512(<64 x i8> %InVec) {
-; CHECK-LABEL: @fold_with_allundef_elts_avx512(
+define <64 x i8> @fold_with_allpoison_elts_avx512(<64 x i8> %InVec) {
+; CHECK-LABEL: @fold_with_allpoison_elts_avx512(
 ; CHECK-NEXT:    ret <64 x i8> undef
 ;
-  %1 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> undef)
+  %1 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> poison)
   ret <64 x i8> %1
 }
 
@@ -479,7 +479,7 @@ define <16 x i8> @demanded_elts_insertion(<16 x i8> %InVec, <16 x i8> %BaseMask,
   %1 = insertelement <16 x i8> %BaseMask, i8 %M0, i32 0
   %2 = insertelement <16 x i8> %1, i8 %M15, i32 15
   %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> %2)
-  %4 = shufflevector <16 x i8> %3, <16 x i8> poison, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef>
+  %4 = shufflevector <16 x i8> %3, <16 x i8> poison, <16 x i32> <i32 poison, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 poison>
   ret <16 x i8> %4
 }
 
@@ -492,7 +492,7 @@ define <32 x i8> @demanded_elts_insertion_avx2(<32 x i8> %InVec, <32 x i8> %Base
   %1 = insertelement <32 x i8> %BaseMask, i8 %M0, i32 0
   %2 = insertelement <32 x i8> %1, i8 %M22, i32 22
   %3 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> %2)
-  %4 = shufflevector <32 x i8> %3, <32 x i8> poison, <32 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 undef, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %4 = shufflevector <32 x i8> %3, <32 x i8> poison, <32 x i32> <i32 poison, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 poison, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   ret <32 x i8> %4
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll b/llvm/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll
index 3270ca134db8..1376266308ab 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll
@@ -50,11 +50,11 @@ define <2 x i64> @test_extrq_constant(<2 x i64> %x, <16 x i8> %y) {
   ret <2 x i64> %1
 }
 
-define <2 x i64> @test_extrq_constant_undef(<2 x i64> %x, <16 x i8> %y) {
-; CHECK-LABEL: @test_extrq_constant_undef(
+define <2 x i64> @test_extrq_constant_poison(<2 x i64> %x, <16 x i8> %y) {
+; CHECK-LABEL: @test_extrq_constant_poison(
 ; CHECK-NEXT:    ret <2 x i64> <i64 65535, i64 undef>
 ;
-  %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> <i64 -1, i64 undef>, <16 x i8> <i8 16, i8 15, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>) nounwind
+  %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> <i64 -1, i64 poison>, <16 x i8> <i8 16, i8 15, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>) nounwind
   ret <2 x i64> %1
 }
 
@@ -65,7 +65,7 @@ define <2 x i64> @test_extrq_call_constexpr(<2 x i64> %x) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[TMP3]]
 ;
-  %1 = call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> bitcast (<2 x i64> <i64 0, i64 undef> to <16 x i8>))
+  %1 = call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> bitcast (<2 x i64> <i64 0, i64 poison> to <16 x i8>))
   ret <2 x i64> %1
 }
 
@@ -104,8 +104,8 @@ define <2 x i64> @test_extrqi_shuffle_2zzzzzzzuuuuuuuu(<2 x i64> %x) {
   ret <2 x i64> %1
 }
 
-define <2 x i64> @test_extrqi_undef(<2 x i64> %x) {
-; CHECK-LABEL: @test_extrqi_undef(
+define <2 x i64> @test_extrqi_poison(<2 x i64> %x) {
+; CHECK-LABEL: @test_extrqi_poison(
 ; CHECK-NEXT:    ret <2 x i64> undef
 ;
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> zeroinitializer, i8 32, i8 33)
@@ -128,11 +128,11 @@ define <2 x i64> @test_extrqi_constant(<2 x i64> %x) {
   ret <2 x i64> %1
 }
 
-define <2 x i64> @test_extrqi_constant_undef(<2 x i64> %x) {
-; CHECK-LABEL: @test_extrqi_constant_undef(
+define <2 x i64> @test_extrqi_constant_poison(<2 x i64> %x) {
+; CHECK-LABEL: @test_extrqi_constant_poison(
 ; CHECK-NEXT:    ret <2 x i64> <i64 15, i64 undef>
 ;
-  %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> <i64 -1, i64 undef>, i8 4, i8 18)
+  %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> <i64 -1, i64 poison>, i8 4, i8 18)
   ret <2 x i64> %1
 }
 
@@ -140,7 +140,7 @@ define <2 x i64> @test_extrqi_call_constexpr() {
 ; CHECK-LABEL: @test_extrqi_call_constexpr(
 ; CHECK-NEXT:    ret <2 x i64> zeroinitializer
 ;
-  %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> <i64 0, i64 undef, i64 2, i64 undef> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 8, i8 16)
+  %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> <i64 0, i64 poison, i64 2, i64 poison> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 8, i8 16)
   ret <2 x i64> %1
 }
 
@@ -174,11 +174,11 @@ define <2 x i64> @test_insertq_constant(<2 x i64> %x, <2 x i64> %y) {
   ret <2 x i64> %1
 }
 
-define <2 x i64> @test_insertq_constant_undef(<2 x i64> %x, <2 x i64> %y) {
-; CHECK-LABEL: @test_insertq_constant_undef(
+define <2 x i64> @test_insertq_constant_poison(<2 x i64> %x, <2 x i64> %y) {
+; CHECK-LABEL: @test_insertq_constant_poison(
 ; CHECK-NEXT:    ret <2 x i64> <i64 33, i64 undef>
 ;
-  %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> <i64 1, i64 undef>, <2 x i64> <i64 8, i64 658>) nounwind
+  %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> <i64 1, i64 poison>, <2 x i64> <i64 8, i64 658>) nounwind
   ret <2 x i64> %1
 }
 
@@ -187,7 +187,7 @@ define <2 x i64> @test_insertq_call_constexpr(<2 x i64> %x) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> <i64 0, i64 poison>, i8 2, i8 0)
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
-  %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> <i64 0, i64 undef, i64 2, i64 undef> to <16 x i16>) to <16 x i8>) to <2 x i64>))
+  %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> <i64 0, i64 poison, i64 2, i64 poison> to <16 x i16>) to <16 x i8>) to <2 x i64>))
   ret <2 x i64> %1
 }
 
@@ -232,12 +232,12 @@ define <2 x i64> @test_insertqi_call_constexpr(<2 x i64> %x) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> <i64 0, i64 poison>, i8 48, i8 3)
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
-  %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> <i64 0, i64 undef, i64 2, i64 undef> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 48, i8 3)
+  %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> <i64 0, i64 poison, i64 2, i64 poison> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 48, i8 3)
   ret <2 x i64> %1
 }
 
 ; The result of this insert is the second arg, since the top 64 bits of
-; the result are undefined, and we copy the bottom 64 bits from the
+; the result are poisonined, and we copy the bottom 64 bits from the
 ; second arg
 define <2 x i64> @testInsert64Bits(<2 x i64> %v, <2 x i64> %i) {
 ; CHECK-LABEL: @testInsert64Bits(

diff  --git a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll
index fdfe6acbdb49..352c305e10c4 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll
@@ -1604,7 +1604,7 @@ define <4 x i32> @avx2_psrav_d_128_allbig(<4 x i32> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i32> [[V:%.*]], <i32 31, i32 31, i32 31, i32 undef>
 ; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
 ;
-  %1 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %v, <4 x i32> <i32 32, i32 100, i32 -255, i32 undef>)
+  %1 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %v, <4 x i32> <i32 32, i32 100, i32 -255, i32 poison>)
   ret <4 x i32> %1
 }
 
@@ -1613,7 +1613,7 @@ define <8 x i32> @avx2_psrav_d_256_allbig(<8 x i32> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <8 x i32> [[V:%.*]], <i32 undef, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
 ; CHECK-NEXT:    ret <8 x i32> [[TMP1]]
 ;
-  %1 = tail call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %v, <8 x i32> <i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
+  %1 = tail call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %v, <8 x i32> <i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
   ret <8 x i32> %1
 }
 
@@ -1622,36 +1622,36 @@ define <16 x i32> @avx512_psrav_d_512_allbig(<16 x i32> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <16 x i32> [[V:%.*]], <i32 undef, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 undef, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
 ; CHECK-NEXT:    ret <16 x i32> [[TMP1]]
 ;
-  %1 = tail call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %v, <16 x i32> <i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555, i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
+  %1 = tail call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %v, <16 x i32> <i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555, i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
   ret <16 x i32> %1
 }
 
-define <4 x i32> @avx2_psrav_d_128_undef(<4 x i32> %v) {
-; CHECK-LABEL: @avx2_psrav_d_128_undef(
+define <4 x i32> @avx2_psrav_d_128_poison(<4 x i32> %v) {
+; CHECK-LABEL: @avx2_psrav_d_128_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i32> [[V:%.*]], <i32 undef, i32 8, i32 16, i32 31>
 ; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
 ;
-  %1 = insertelement <4 x i32> <i32 0, i32 8, i32 16, i32 64>, i32 undef, i32 0
+  %1 = insertelement <4 x i32> <i32 0, i32 8, i32 16, i32 64>, i32 poison, i32 0
   %2 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %v, <4 x i32> %1)
   ret <4 x i32> %2
 }
 
-define <8 x i32> @avx2_psrav_d_256_undef(<8 x i32> %v) {
-; CHECK-LABEL: @avx2_psrav_d_256_undef(
+define <8 x i32> @avx2_psrav_d_256_poison(<8 x i32> %v) {
+; CHECK-LABEL: @avx2_psrav_d_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <8 x i32> [[V:%.*]], <i32 0, i32 undef, i32 16, i32 24, i32 31, i32 24, i32 8, i32 0>
 ; CHECK-NEXT:    ret <8 x i32> [[TMP1]]
 ;
-  %1 = insertelement <8 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 24, i32 8, i32 0>, i32 undef, i32 1
+  %1 = insertelement <8 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 24, i32 8, i32 0>, i32 poison, i32 1
   %2 = tail call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %v, <8 x i32> %1)
   ret <8 x i32> %2
 }
 
-define <16 x i32> @avx512_psrav_d_512_undef(<16 x i32> %v) {
-; CHECK-LABEL: @avx512_psrav_d_512_undef(
+define <16 x i32> @avx512_psrav_d_512_poison(<16 x i32> %v) {
+; CHECK-LABEL: @avx512_psrav_d_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <16 x i32> [[V:%.*]], <i32 0, i32 undef, i32 16, i32 24, i32 31, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 24, i32 31, i32 24, i32 8, i32 0>
 ; CHECK-NEXT:    ret <16 x i32> [[TMP1]]
 ;
-  %1 = insertelement <16 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 24, i32 32, i32 24, i32 8, i32 0>, i32 undef, i32 1
+  %1 = insertelement <16 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 24, i32 32, i32 24, i32 8, i32 0>, i32 poison, i32 1
   %2 = tail call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %v, <16 x i32> %1)
   ret <16 x i32> %2
 }
@@ -1695,7 +1695,7 @@ define <2 x i64> @avx512_psrav_q_128_allbig(<2 x i64> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <2 x i64> [[V:%.*]], <i64 63, i64 undef>
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
-  %1 = tail call <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64> %v, <2 x i64> <i64 64, i64 undef>)
+  %1 = tail call <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64> %v, <2 x i64> <i64 64, i64 poison>)
   ret <2 x i64> %1
 }
 
@@ -1704,26 +1704,26 @@ define <4 x i64> @avx512_psrav_q_256_allbig(<4 x i64> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i64> [[V:%.*]], <i64 63, i64 undef, i64 63, i64 63>
 ; CHECK-NEXT:    ret <4 x i64> [[TMP1]]
 ;
-  %1 = tail call <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64> %v, <4 x i64> <i64 64, i64 undef, i64 -128, i64 -60>)
+  %1 = tail call <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64> %v, <4 x i64> <i64 64, i64 poison, i64 -128, i64 -60>)
   ret <4 x i64> %1
 }
 
-define <2 x i64> @avx512_psrav_q_128_undef(<2 x i64> %v) {
-; CHECK-LABEL: @avx512_psrav_q_128_undef(
+define <2 x i64> @avx512_psrav_q_128_poison(<2 x i64> %v) {
+; CHECK-LABEL: @avx512_psrav_q_128_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <2 x i64> [[V:%.*]], <i64 undef, i64 8>
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
-  %1 = insertelement <2 x i64> <i64 0, i64 8>, i64 undef, i64 0
+  %1 = insertelement <2 x i64> <i64 0, i64 8>, i64 poison, i64 0
   %2 = tail call <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64> %v, <2 x i64> %1)
   ret <2 x i64> %2
 }
 
-define <4 x i64> @avx512_psrav_q_256_undef(<4 x i64> %v) {
-; CHECK-LABEL: @avx512_psrav_q_256_undef(
+define <4 x i64> @avx512_psrav_q_256_poison(<4 x i64> %v) {
+; CHECK-LABEL: @avx512_psrav_q_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i64> [[V:%.*]], <i64 undef, i64 8, i64 16, i64 31>
 ; CHECK-NEXT:    ret <4 x i64> [[TMP1]]
 ;
-  %1 = insertelement <4 x i64> <i64 0, i64 8, i64 16, i64 31>, i64 undef, i64 0
+  %1 = insertelement <4 x i64> <i64 0, i64 8, i64 16, i64 31>, i64 poison, i64 0
   %2 = tail call <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64> %v, <4 x i64> %1)
   ret <4 x i64> %2
 }
@@ -1750,16 +1750,16 @@ define <8 x i64> @avx512_psrav_q_512_allbig(<8 x i64> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <8 x i64> [[V:%.*]], <i64 63, i64 undef, i64 63, i64 63, i64 63, i64 undef, i64 63, i64 63>
 ; CHECK-NEXT:    ret <8 x i64> [[TMP1]]
 ;
-  %1 = tail call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %v, <8 x i64> <i64 64, i64 undef, i64 -128, i64 -60, i64 64, i64 undef, i64 -128, i64 -60>)
+  %1 = tail call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %v, <8 x i64> <i64 64, i64 poison, i64 -128, i64 -60, i64 64, i64 poison, i64 -128, i64 -60>)
   ret <8 x i64> %1
 }
 
-define <8 x i64> @avx512_psrav_q_512_undef(<8 x i64> %v) {
-; CHECK-LABEL: @avx512_psrav_q_512_undef(
+define <8 x i64> @avx512_psrav_q_512_poison(<8 x i64> %v) {
+; CHECK-LABEL: @avx512_psrav_q_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <8 x i64> [[V:%.*]], <i64 undef, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>
 ; CHECK-NEXT:    ret <8 x i64> [[TMP1]]
 ;
-  %1 = insertelement <8 x i64> <i64 0, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>, i64 undef, i64 0
+  %1 = insertelement <8 x i64> <i64 0, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>, i64 poison, i64 0
   %2 = tail call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %v, <8 x i64> %1)
   ret <8 x i64> %2
 }
@@ -1786,16 +1786,16 @@ define <8 x i16> @avx512_psrav_w_128_allbig(<8 x i16> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <8 x i16> [[V:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 undef>
 ; CHECK-NEXT:    ret <8 x i16> [[TMP1]]
 ;
-  %1 = tail call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %v, <8 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 undef>)
+  %1 = tail call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %v, <8 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 poison>)
   ret <8 x i16> %1
 }
 
-define <8 x i16> @avx512_psrav_w_128_undef(<8 x i16> %v) {
-; CHECK-LABEL: @avx512_psrav_w_128_undef(
+define <8 x i16> @avx512_psrav_w_128_poison(<8 x i16> %v) {
+; CHECK-LABEL: @avx512_psrav_w_128_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <8 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
 ; CHECK-NEXT:    ret <8 x i16> [[TMP1]]
 ;
-  %1 = insertelement <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, i16 undef, i64 0
+  %1 = insertelement <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, i16 poison, i64 0
   %2 = tail call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %v, <8 x i16> %1)
   ret <8 x i16> %2
 }
@@ -1822,16 +1822,16 @@ define <16 x i16> @avx512_psrav_w_256_allbig(<16 x i16> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <16 x i16> [[V:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 undef, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
 ; CHECK-NEXT:    ret <16 x i16> [[TMP1]]
 ;
-  %1 = tail call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %v, <16 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 undef, i16 64, i16 -10, i16 256, i16 16, i16 28, i16 65535, i16 32767>)
+  %1 = tail call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %v, <16 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 poison, i16 64, i16 -10, i16 256, i16 16, i16 28, i16 65535, i16 32767>)
   ret <16 x i16> %1
 }
 
-define <16 x i16> @avx512_psrav_w_256_undef(<16 x i16> %v) {
-; CHECK-LABEL: @avx512_psrav_w_256_undef(
+define <16 x i16> @avx512_psrav_w_256_poison(<16 x i16> %v) {
+; CHECK-LABEL: @avx512_psrav_w_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <16 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
 ; CHECK-NEXT:    ret <16 x i16> [[TMP1]]
 ;
-  %1 = insertelement <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, i16 undef, i64 0
+  %1 = insertelement <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, i16 poison, i64 0
   %2 = tail call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %v, <16 x i16> %1)
   ret <16 x i16> %2
 }
@@ -1858,16 +1858,16 @@ define <32 x i16> @avx512_psrav_w_512_allbig(<32 x i16> %v) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <32 x i16> [[V:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 undef, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 undef, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 undef, i16 15, i16 15, i16 undef, i16 15, i16 15>
 ; CHECK-NEXT:    ret <32 x i16> [[TMP1]]
 ;
-  %1 = tail call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %v, <32 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 undef, i16 64, i16 -10, i16 128, i16 16, i16 28, i16 65535, i16 32767, i16 56, i16 -14, i16 undef, i16 16, i16 67, i16 567, i16 -32768, i16 4096, i16 8192, i16 -12345, i16 undef, i16 345, i16 123, i16 undef, i16 1024, i16 54321>)
+  %1 = tail call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %v, <32 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 poison, i16 64, i16 -10, i16 128, i16 16, i16 28, i16 65535, i16 32767, i16 56, i16 -14, i16 poison, i16 16, i16 67, i16 567, i16 -32768, i16 4096, i16 8192, i16 -12345, i16 poison, i16 345, i16 123, i16 poison, i16 1024, i16 54321>)
   ret <32 x i16> %1
 }
 
-define <32 x i16> @avx512_psrav_w_512_undef(<32 x i16> %v) {
-; CHECK-LABEL: @avx512_psrav_w_512_undef(
+define <32 x i16> @avx512_psrav_w_512_poison(<32 x i16> %v) {
+; CHECK-LABEL: @avx512_psrav_w_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <32 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>
 ; CHECK-NEXT:    ret <32 x i16> [[TMP1]]
 ;
-  %1 = insertelement <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 undef, i64 0
+  %1 = insertelement <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 poison, i64 0
   %2 = tail call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %v, <32 x i16> %1)
   ret <32 x i16> %2
 }
@@ -1932,7 +1932,7 @@ define <4 x i32> @avx2_psrlv_d_128_allbig(<4 x i32> %v) {
 ; CHECK-LABEL: @avx2_psrlv_d_128_allbig(
 ; CHECK-NEXT:    ret <4 x i32> <i32 0, i32 0, i32 0, i32 undef>
 ;
-  %1 = tail call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %v, <4 x i32> <i32 32, i32 100, i32 -255, i32 undef>)
+  %1 = tail call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %v, <4 x i32> <i32 32, i32 100, i32 -255, i32 poison>)
   ret <4 x i32> %1
 }
 
@@ -1940,26 +1940,26 @@ define <8 x i32> @avx2_psrlv_d_256_allbig(<8 x i32> %v) {
 ; CHECK-LABEL: @avx2_psrlv_d_256_allbig(
 ; CHECK-NEXT:    ret <8 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
 ;
-  %1 = tail call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %v, <8 x i32> <i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
+  %1 = tail call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %v, <8 x i32> <i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
   ret <8 x i32> %1
 }
 
-define <4 x i32> @avx2_psrlv_d_128_undef(<4 x i32> %v) {
-; CHECK-LABEL: @avx2_psrlv_d_128_undef(
+define <4 x i32> @avx2_psrlv_d_128_poison(<4 x i32> %v) {
+; CHECK-LABEL: @avx2_psrlv_d_128_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <4 x i32> [[V:%.*]], <i32 undef, i32 8, i32 16, i32 31>
 ; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
 ;
-  %1 = insertelement <4 x i32> <i32 0, i32 8, i32 16, i32 31>, i32 undef, i32 0
+  %1 = insertelement <4 x i32> <i32 0, i32 8, i32 16, i32 31>, i32 poison, i32 0
   %2 = tail call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %v, <4 x i32> %1)
   ret <4 x i32> %2
 }
 
-define <8 x i32> @avx2_psrlv_d_256_undef(<8 x i32> %v) {
-; CHECK-LABEL: @avx2_psrlv_d_256_undef(
+define <8 x i32> @avx2_psrlv_d_256_poison(<8 x i32> %v) {
+; CHECK-LABEL: @avx2_psrlv_d_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <8 x i32> [[V:%.*]], <i32 0, i32 undef, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>
 ; CHECK-NEXT:    ret <8 x i32> [[TMP1]]
 ;
-  %1 = insertelement <8 x i32> <i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>, i32 undef, i32 1
+  %1 = insertelement <8 x i32> <i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>, i32 poison, i32 1
   %2 = tail call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %v, <8 x i32> %1)
   ret <8 x i32> %2
 }
@@ -2028,27 +2028,27 @@ define <4 x i64> @avx2_psrlv_q_256_allbig(<4 x i64> %v) {
 ; CHECK-LABEL: @avx2_psrlv_q_256_allbig(
 ; CHECK-NEXT:    ret <4 x i64> <i64 0, i64 undef, i64 0, i64 0>
 ;
-  %1 = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %v, <4 x i64> <i64 64, i64 undef, i64 -128, i64 -60>)
+  %1 = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %v, <4 x i64> <i64 64, i64 poison, i64 -128, i64 -60>)
   ret <4 x i64> %1
 }
 
-; The shift amount is 0 (the undef lane could be 0), so we return the unshifted input.
+; The shift amount is 0 (the poison lane could be 0), so we return the unshifted input.
 
-define <2 x i64> @avx2_psrlv_q_128_undef(<2 x i64> %v) {
-; CHECK-LABEL: @avx2_psrlv_q_128_undef(
+define <2 x i64> @avx2_psrlv_q_128_poison(<2 x i64> %v) {
+; CHECK-LABEL: @avx2_psrlv_q_128_poison(
 ; CHECK-NEXT:    ret <2 x i64> [[V:%.*]]
 ;
-  %1 = insertelement <2 x i64> <i64 0, i64 8>, i64 undef, i64 1
+  %1 = insertelement <2 x i64> <i64 0, i64 8>, i64 poison, i64 1
   %2 = tail call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %v, <2 x i64> %1)
   ret <2 x i64> %2
 }
 
-define <4 x i64> @avx2_psrlv_q_256_undef(<4 x i64> %v) {
-; CHECK-LABEL: @avx2_psrlv_q_256_undef(
+define <4 x i64> @avx2_psrlv_q_256_poison(<4 x i64> %v) {
+; CHECK-LABEL: @avx2_psrlv_q_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <4 x i64> [[V:%.*]], <i64 undef, i64 8, i64 16, i64 31>
 ; CHECK-NEXT:    ret <4 x i64> [[TMP1]]
 ;
-  %1 = insertelement <4 x i64> <i64 0, i64 8, i64 16, i64 31>, i64 undef, i64 0
+  %1 = insertelement <4 x i64> <i64 0, i64 8, i64 16, i64 31>, i64 poison, i64 0
   %2 = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %v, <4 x i64> %1)
   ret <4 x i64> %2
 }
@@ -2083,16 +2083,16 @@ define <16 x i32> @avx512_psrlv_d_512_allbig(<16 x i32> %v) {
 ; CHECK-LABEL: @avx512_psrlv_d_512_allbig(
 ; CHECK-NEXT:    ret <16 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
 ;
-  %1 = tail call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %v, <16 x i32> <i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555, i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
+  %1 = tail call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %v, <16 x i32> <i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555, i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
   ret <16 x i32> %1
 }
 
-define <16 x i32> @avx512_psrlv_d_512_undef(<16 x i32> %v) {
-; CHECK-LABEL: @avx512_psrlv_d_512_undef(
+define <16 x i32> @avx512_psrlv_d_512_poison(<16 x i32> %v) {
+; CHECK-LABEL: @avx512_psrlv_d_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <16 x i32> [[V:%.*]], <i32 0, i32 undef, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>
 ; CHECK-NEXT:    ret <16 x i32> [[TMP1]]
 ;
-  %1 = insertelement <16 x i32> <i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>, i32 undef, i32 1
+  %1 = insertelement <16 x i32> <i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>, i32 poison, i32 1
   %2 = tail call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %v, <16 x i32> %1)
   ret <16 x i32> %2
 }
@@ -2127,16 +2127,16 @@ define <8 x i64> @avx512_psrlv_q_512_allbig(<8 x i64> %v) {
 ; CHECK-LABEL: @avx512_psrlv_q_512_allbig(
 ; CHECK-NEXT:    ret <8 x i64> <i64 0, i64 undef, i64 0, i64 0, i64 0, i64 undef, i64 0, i64 0>
 ;
-  %1 = tail call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %v, <8 x i64> <i64 64, i64 undef, i64 -128, i64 -60, i64 64, i64 undef, i64 -128, i64 -60>)
+  %1 = tail call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %v, <8 x i64> <i64 64, i64 poison, i64 -128, i64 -60, i64 64, i64 poison, i64 -128, i64 -60>)
   ret <8 x i64> %1
 }
 
-define <8 x i64> @avx512_psrlv_q_512_undef(<8 x i64> %v) {
-; CHECK-LABEL: @avx512_psrlv_q_512_undef(
+define <8 x i64> @avx512_psrlv_q_512_poison(<8 x i64> %v) {
+; CHECK-LABEL: @avx512_psrlv_q_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <8 x i64> [[V:%.*]], <i64 undef, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>
 ; CHECK-NEXT:    ret <8 x i64> [[TMP1]]
 ;
-  %1 = insertelement <8 x i64> <i64 0, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>, i64 undef, i64 0
+  %1 = insertelement <8 x i64> <i64 0, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>, i64 poison, i64 0
   %2 = tail call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %v, <8 x i64> %1)
   ret <8 x i64> %2
 }
@@ -2171,16 +2171,16 @@ define <8 x i16> @avx512_psrlv_w_128_allbig(<8 x i16> %v) {
 ; CHECK-LABEL: @avx512_psrlv_w_128_allbig(
 ; CHECK-NEXT:    ret <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef>
 ;
-  %1 = tail call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> %v, <8 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 undef>)
+  %1 = tail call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> %v, <8 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 poison>)
   ret <8 x i16> %1
 }
 
-define <8 x i16> @avx512_psrlv_w_128_undef(<8 x i16> %v) {
-; CHECK-LABEL: @avx512_psrlv_w_128_undef(
+define <8 x i16> @avx512_psrlv_w_128_poison(<8 x i16> %v) {
+; CHECK-LABEL: @avx512_psrlv_w_128_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <8 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
 ; CHECK-NEXT:    ret <8 x i16> [[TMP1]]
 ;
-  %1 = insertelement <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, i16 undef, i64 0
+  %1 = insertelement <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, i16 poison, i64 0
   %2 = tail call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> %v, <8 x i16> %1)
   ret <8 x i16> %2
 }
@@ -2215,16 +2215,16 @@ define <16 x i16> @avx512_psrlv_w_256_allbig(<16 x i16> %v) {
 ; CHECK-LABEL: @avx512_psrlv_w_256_allbig(
 ; CHECK-NEXT:    ret <16 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
 ;
-  %1 = tail call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> %v, <16 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 undef, i16 64, i16 -10, i16 256, i16 16, i16 28, i16 65535, i16 32767>)
+  %1 = tail call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> %v, <16 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 poison, i16 64, i16 -10, i16 256, i16 16, i16 28, i16 65535, i16 32767>)
   ret <16 x i16> %1
 }
 
-define <16 x i16> @avx512_psrlv_w_256_undef(<16 x i16> %v) {
-; CHECK-LABEL: @avx512_psrlv_w_256_undef(
+define <16 x i16> @avx512_psrlv_w_256_poison(<16 x i16> %v) {
+; CHECK-LABEL: @avx512_psrlv_w_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <16 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
 ; CHECK-NEXT:    ret <16 x i16> [[TMP1]]
 ;
-  %1 = insertelement <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, i16 undef, i64 0
+  %1 = insertelement <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, i16 poison, i64 0
   %2 = tail call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> %v, <16 x i16> %1)
   ret <16 x i16> %2
 }
@@ -2259,16 +2259,16 @@ define <32 x i16> @avx512_psrlv_w_512_allbig(<32 x i16> %v) {
 ; CHECK-LABEL: @avx512_psrlv_w_512_allbig(
 ; CHECK-NEXT:    ret <32 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 undef, i16 0, i16 0>
 ;
-  %1 = tail call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %v, <32 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 undef, i16 64, i16 -10, i16 128, i16 16, i16 28, i16 65535, i16 32767, i16 56, i16 -14, i16 undef, i16 16, i16 67, i16 567, i16 -32768, i16 4096, i16 8192, i16 -12345, i16 undef, i16 345, i16 123, i16 undef, i16 1024, i16 54321>)
+  %1 = tail call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %v, <32 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 poison, i16 64, i16 -10, i16 128, i16 16, i16 28, i16 65535, i16 32767, i16 56, i16 -14, i16 poison, i16 16, i16 67, i16 567, i16 -32768, i16 4096, i16 8192, i16 -12345, i16 poison, i16 345, i16 123, i16 poison, i16 1024, i16 54321>)
   ret <32 x i16> %1
 }
 
-define <32 x i16> @avx512_psrlv_w_512_undef(<32 x i16> %v) {
-; CHECK-LABEL: @avx512_psrlv_w_512_undef(
+define <32 x i16> @avx512_psrlv_w_512_poison(<32 x i16> %v) {
+; CHECK-LABEL: @avx512_psrlv_w_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <32 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>
 ; CHECK-NEXT:    ret <32 x i16> [[TMP1]]
 ;
-  %1 = insertelement <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 undef, i64 0
+  %1 = insertelement <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 poison, i64 0
   %2 = tail call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %v, <32 x i16> %1)
   ret <32 x i16> %2
 }
@@ -2333,7 +2333,7 @@ define <4 x i32> @avx2_psllv_d_128_allbig(<4 x i32> %v) {
 ; CHECK-LABEL: @avx2_psllv_d_128_allbig(
 ; CHECK-NEXT:    ret <4 x i32> <i32 0, i32 0, i32 0, i32 undef>
 ;
-  %1 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %v, <4 x i32> <i32 32, i32 100, i32 -255, i32 undef>)
+  %1 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %v, <4 x i32> <i32 32, i32 100, i32 -255, i32 poison>)
   ret <4 x i32> %1
 }
 
@@ -2341,26 +2341,26 @@ define <8 x i32> @avx2_psllv_d_256_allbig(<8 x i32> %v) {
 ; CHECK-LABEL: @avx2_psllv_d_256_allbig(
 ; CHECK-NEXT:    ret <8 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
 ;
-  %1 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %v, <8 x i32> <i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
+  %1 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %v, <8 x i32> <i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
   ret <8 x i32> %1
 }
 
-define <4 x i32> @avx2_psllv_d_128_undef(<4 x i32> %v) {
-; CHECK-LABEL: @avx2_psllv_d_128_undef(
+define <4 x i32> @avx2_psllv_d_128_poison(<4 x i32> %v) {
+; CHECK-LABEL: @avx2_psllv_d_128_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <4 x i32> [[V:%.*]], <i32 undef, i32 8, i32 16, i32 31>
 ; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
 ;
-  %1 = insertelement <4 x i32> <i32 0, i32 8, i32 16, i32 31>, i32 undef, i32 0
+  %1 = insertelement <4 x i32> <i32 0, i32 8, i32 16, i32 31>, i32 poison, i32 0
   %2 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %v, <4 x i32> %1)
   ret <4 x i32> %2
 }
 
-define <8 x i32> @avx2_psllv_d_256_undef(<8 x i32> %v) {
-; CHECK-LABEL: @avx2_psllv_d_256_undef(
+define <8 x i32> @avx2_psllv_d_256_poison(<8 x i32> %v) {
+; CHECK-LABEL: @avx2_psllv_d_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <8 x i32> [[V:%.*]], <i32 0, i32 undef, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>
 ; CHECK-NEXT:    ret <8 x i32> [[TMP1]]
 ;
-  %1 = insertelement <8 x i32> <i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>, i32 undef, i32 1
+  %1 = insertelement <8 x i32> <i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>, i32 poison, i32 1
   %2 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %v, <8 x i32> %1)
   ret <8 x i32> %2
 }
@@ -2429,27 +2429,27 @@ define <4 x i64> @avx2_psllv_q_256_allbig(<4 x i64> %v) {
 ; CHECK-LABEL: @avx2_psllv_q_256_allbig(
 ; CHECK-NEXT:    ret <4 x i64> <i64 0, i64 undef, i64 0, i64 0>
 ;
-  %1 = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %v, <4 x i64> <i64 64, i64 undef, i64 -128, i64 -60>)
+  %1 = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %v, <4 x i64> <i64 64, i64 poison, i64 -128, i64 -60>)
   ret <4 x i64> %1
 }
 
-; The shift amount is 0 (the undef lane could be 0), so we return the unshifted input.
+; The shift amount is 0 (the poison lane could be 0), so we return the unshifted input.
 
-define <2 x i64> @avx2_psllv_q_128_undef(<2 x i64> %v) {
-; CHECK-LABEL: @avx2_psllv_q_128_undef(
+define <2 x i64> @avx2_psllv_q_128_poison(<2 x i64> %v) {
+; CHECK-LABEL: @avx2_psllv_q_128_poison(
 ; CHECK-NEXT:    ret <2 x i64> [[V:%.*]]
 ;
-  %1 = insertelement <2 x i64> <i64 0, i64 8>, i64 undef, i64 1
+  %1 = insertelement <2 x i64> <i64 0, i64 8>, i64 poison, i64 1
   %2 = tail call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %v, <2 x i64> %1)
   ret <2 x i64> %2
 }
 
-define <4 x i64> @avx2_psllv_q_256_undef(<4 x i64> %v) {
-; CHECK-LABEL: @avx2_psllv_q_256_undef(
+define <4 x i64> @avx2_psllv_q_256_poison(<4 x i64> %v) {
+; CHECK-LABEL: @avx2_psllv_q_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <4 x i64> [[V:%.*]], <i64 undef, i64 8, i64 16, i64 31>
 ; CHECK-NEXT:    ret <4 x i64> [[TMP1]]
 ;
-  %1 = insertelement <4 x i64> <i64 0, i64 8, i64 16, i64 31>, i64 undef, i64 0
+  %1 = insertelement <4 x i64> <i64 0, i64 8, i64 16, i64 31>, i64 poison, i64 0
   %2 = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %v, <4 x i64> %1)
   ret <4 x i64> %2
 }
@@ -2484,16 +2484,16 @@ define <16 x i32> @avx512_psllv_d_512_allbig(<16 x i32> %v) {
 ; CHECK-LABEL: @avx512_psllv_d_512_allbig(
 ; CHECK-NEXT:    ret <16 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
 ;
-  %1 = tail call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %v, <16 x i32> <i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555, i32 undef, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
+  %1 = tail call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %v, <16 x i32> <i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555, i32 poison, i32 100, i32 255, i32 55555, i32 -32, i32 -100, i32 -255, i32 -55555>)
   ret <16 x i32> %1
 }
 
-define <16 x i32> @avx512_psllv_d_512_undef(<16 x i32> %v) {
-; CHECK-LABEL: @avx512_psllv_d_512_undef(
+define <16 x i32> @avx512_psllv_d_512_poison(<16 x i32> %v) {
+; CHECK-LABEL: @avx512_psllv_d_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <16 x i32> [[V:%.*]], <i32 0, i32 undef, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>
 ; CHECK-NEXT:    ret <16 x i32> [[TMP1]]
 ;
-  %1 = insertelement <16 x i32> <i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>, i32 undef, i32 1
+  %1 = insertelement <16 x i32> <i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0, i32 0, i32 8, i32 16, i32 31, i32 31, i32 24, i32 8, i32 0>, i32 poison, i32 1
   %2 = tail call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %v, <16 x i32> %1)
   ret <16 x i32> %2
 }
@@ -2528,16 +2528,16 @@ define <8 x i64> @avx512_psllv_q_512_allbig(<8 x i64> %v) {
 ; CHECK-LABEL: @avx512_psllv_q_512_allbig(
 ; CHECK-NEXT:    ret <8 x i64> <i64 0, i64 undef, i64 0, i64 0, i64 0, i64 undef, i64 0, i64 0>
 ;
-  %1 = tail call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %v, <8 x i64> <i64 64, i64 undef, i64 -128, i64 -60, i64 64, i64 undef, i64 -128, i64 -60>)
+  %1 = tail call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %v, <8 x i64> <i64 64, i64 poison, i64 -128, i64 -60, i64 64, i64 poison, i64 -128, i64 -60>)
   ret <8 x i64> %1
 }
 
-define <8 x i64> @avx512_psllv_q_512_undef(<8 x i64> %v) {
-; CHECK-LABEL: @avx512_psllv_q_512_undef(
+define <8 x i64> @avx512_psllv_q_512_poison(<8 x i64> %v) {
+; CHECK-LABEL: @avx512_psllv_q_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <8 x i64> [[V:%.*]], <i64 undef, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>
 ; CHECK-NEXT:    ret <8 x i64> [[TMP1]]
 ;
-  %1 = insertelement <8 x i64> <i64 0, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>, i64 undef, i64 0
+  %1 = insertelement <8 x i64> <i64 0, i64 8, i64 16, i64 31, i64 0, i64 8, i64 16, i64 31>, i64 poison, i64 0
   %2 = tail call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %v, <8 x i64> %1)
   ret <8 x i64> %2
 }
@@ -2572,16 +2572,16 @@ define <8 x i16> @avx512_psllv_w_128_allbig(<8 x i16> %v) {
 ; CHECK-LABEL: @avx512_psllv_w_128_allbig(
 ; CHECK-NEXT:    ret <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef>
 ;
-  %1 = tail call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %v, <8 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 undef>)
+  %1 = tail call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %v, <8 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 poison>)
   ret <8 x i16> %1
 }
 
-define <8 x i16> @avx512_psllv_w_128_undef(<8 x i16> %v) {
-; CHECK-LABEL: @avx512_psllv_w_128_undef(
+define <8 x i16> @avx512_psllv_w_128_poison(<8 x i16> %v) {
+; CHECK-LABEL: @avx512_psllv_w_128_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <8 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
 ; CHECK-NEXT:    ret <8 x i16> [[TMP1]]
 ;
-  %1 = insertelement <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, i16 undef, i64 0
+  %1 = insertelement <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, i16 poison, i64 0
   %2 = tail call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %v, <8 x i16> %1)
   ret <8 x i16> %2
 }
@@ -2616,16 +2616,16 @@ define <16 x i16> @avx512_psllv_w_256_allbig(<16 x i16> %v) {
 ; CHECK-LABEL: @avx512_psllv_w_256_allbig(
 ; CHECK-NEXT:    ret <16 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
 ;
-  %1 = tail call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %v, <16 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 undef, i16 64, i16 -10, i16 256, i16 16, i16 28, i16 65535, i16 32767>)
+  %1 = tail call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %v, <16 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 poison, i16 64, i16 -10, i16 256, i16 16, i16 28, i16 65535, i16 32767>)
   ret <16 x i16> %1
 }
 
-define <16 x i16> @avx512_psllv_w_256_undef(<16 x i16> %v) {
-; CHECK-LABEL: @avx512_psllv_w_256_undef(
+define <16 x i16> @avx512_psllv_w_256_poison(<16 x i16> %v) {
+; CHECK-LABEL: @avx512_psllv_w_256_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <16 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
 ; CHECK-NEXT:    ret <16 x i16> [[TMP1]]
 ;
-  %1 = insertelement <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, i16 undef, i64 0
+  %1 = insertelement <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, i16 poison, i64 0
   %2 = tail call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %v, <16 x i16> %1)
   ret <16 x i16> %2
 }
@@ -2660,16 +2660,16 @@ define <32 x i16> @avx512_psllv_w_512_allbig(<32 x i16> %v) {
 ; CHECK-LABEL: @avx512_psllv_w_512_allbig(
 ; CHECK-NEXT:    ret <32 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 undef, i16 0, i16 0>
 ;
-  %1 = tail call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %v, <32 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 undef, i16 64, i16 -10, i16 128, i16 16, i16 28, i16 65535, i16 32767, i16 56, i16 -14, i16 undef, i16 16, i16 67, i16 567, i16 -32768, i16 4096, i16 8192, i16 -12345, i16 undef, i16 345, i16 123, i16 undef, i16 1024, i16 54321>)
+  %1 = tail call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %v, <32 x i16> <i16 20, i16 -1, i16 -2, i16 33, i16 44, i16 55, i16 66, i16 -7, i16 poison, i16 64, i16 -10, i16 128, i16 16, i16 28, i16 65535, i16 32767, i16 56, i16 -14, i16 poison, i16 16, i16 67, i16 567, i16 -32768, i16 4096, i16 8192, i16 -12345, i16 poison, i16 345, i16 123, i16 poison, i16 1024, i16 54321>)
   ret <32 x i16> %1
 }
 
-define <32 x i16> @avx512_psllv_w_512_undef(<32 x i16> %v) {
-; CHECK-LABEL: @avx512_psllv_w_512_undef(
+define <32 x i16> @avx512_psllv_w_512_poison(<32 x i16> %v) {
+; CHECK-LABEL: @avx512_psllv_w_512_poison(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <32 x i16> [[V:%.*]], <i16 undef, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>
 ; CHECK-NEXT:    ret <32 x i16> [[TMP1]]
 ;
-  %1 = insertelement <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 undef, i64 0
+  %1 = insertelement <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 poison, i64 0
   %2 = tail call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %v, <32 x i16> %1)
   ret <32 x i16> %2
 }
@@ -2685,7 +2685,7 @@ define <8 x i16> @sse2_psra_w_128_masked(<8 x i16> %v, <8 x i16> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = ashr <8 x i16> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <8 x i16> [[TMP3]]
 ;
-  %1 = and <8 x i16> %a, <i16 15, i16 0, i16 0, i16 0, i16 undef, i16 undef, i16 undef, i16 undef>
+  %1 = and <8 x i16> %a, <i16 15, i16 0, i16 0, i16 0, i16 poison, i16 poison, i16 poison, i16 poison>
   %2 = tail call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %v, <8 x i16> %1)
   ret <8 x i16> %2
 }
@@ -2697,7 +2697,7 @@ define <8 x i32> @avx2_psra_d_256_masked(<8 x i32> %v, <4 x i32> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = ashr <8 x i32> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <8 x i32> [[TMP3]]
 ;
-  %1 = and <4 x i32> %a, <i32 31, i32 0, i32 undef, i32 undef>
+  %1 = and <4 x i32> %a, <i32 31, i32 0, i32 poison, i32 poison>
   %2 = tail call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %v, <4 x i32> %1)
   ret <8 x i32> %2
 }
@@ -2709,7 +2709,7 @@ define <8 x i64> @avx512_psra_q_512_masked(<8 x i64> %v, <2 x i64> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = ashr <8 x i64> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <8 x i64> [[TMP3]]
 ;
-  %1 = and <2 x i64> %a, <i64 63, i64 undef>
+  %1 = and <2 x i64> %a, <i64 63, i64 poison>
   %2 = tail call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %v, <2 x i64> %1)
   ret <8 x i64> %2
 }
@@ -2721,7 +2721,7 @@ define <4 x i32> @sse2_psrl_d_128_masked(<4 x i32> %v, <4 x i32> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = lshr <4 x i32> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <4 x i32> [[TMP3]]
 ;
-  %1 = and <4 x i32> %a, <i32 31, i32 0, i32 undef, i32 undef>
+  %1 = and <4 x i32> %a, <i32 31, i32 0, i32 poison, i32 poison>
   %2 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> %1)
   ret <4 x i32> %2
 }
@@ -2733,7 +2733,7 @@ define <4 x i64> @avx2_psrl_q_256_masked(<4 x i64> %v, <2 x i64> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = lshr <4 x i64> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <4 x i64> [[TMP3]]
 ;
-  %1 = and <2 x i64> %a, <i64 63, i64 undef>
+  %1 = and <2 x i64> %a, <i64 63, i64 poison>
   %2 = tail call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %v, <2 x i64> %1)
   ret <4 x i64> %2
 }
@@ -2745,7 +2745,7 @@ define <32 x i16> @avx512_psrl_w_512_masked(<32 x i16> %v, <8 x i16> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = lshr <32 x i16> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <32 x i16> [[TMP3]]
 ;
-  %1 = and <8 x i16> %a, <i16 15, i16 0, i16 0, i16 0, i16 undef, i16 undef, i16 undef, i16 undef>
+  %1 = and <8 x i16> %a, <i16 15, i16 0, i16 0, i16 0, i16 poison, i16 poison, i16 poison, i16 poison>
   %2 = tail call <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16> %v, <8 x i16> %1)
   ret <32 x i16> %2
 }
@@ -2757,7 +2757,7 @@ define <2 x i64> @sse2_psll_q_128_masked(<2 x i64> %v, <2 x i64> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl <2 x i64> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <2 x i64> [[TMP3]]
 ;
-  %1 = and <2 x i64> %a, <i64 63, i64 undef>
+  %1 = and <2 x i64> %a, <i64 63, i64 poison>
   %2 = tail call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %v, <2 x i64> %1)
   ret <2 x i64> %2
 }
@@ -2769,7 +2769,7 @@ define <16 x i16> @avx2_psll_w_256_masked(<16 x i16> %v, <8 x i16> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl <16 x i16> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <16 x i16> [[TMP3]]
 ;
-  %1 = and <8 x i16> %a, <i16 15, i16 0, i16 0, i16 0, i16 undef, i16 undef, i16 undef, i16 undef>
+  %1 = and <8 x i16> %a, <i16 15, i16 0, i16 0, i16 0, i16 poison, i16 poison, i16 poison, i16 poison>
   %2 = tail call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %v, <8 x i16> %1)
   ret <16 x i16> %2
 }
@@ -2781,7 +2781,7 @@ define <16 x i32> @avx512_psll_d_512_masked(<16 x i32> %v, <4 x i32> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl <16 x i32> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <16 x i32> [[TMP3]]
 ;
-  %1 = and <4 x i32> %a, <i32 31, i32 0, i32 undef, i32 undef>
+  %1 = and <4 x i32> %a, <i32 31, i32 0, i32 poison, i32 poison>
   %2 = tail call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %v, <4 x i32> %1)
   ret <16 x i32> %2
 }
@@ -2927,7 +2927,7 @@ define <4 x i32> @avx2_psrav_d_128_masked_shuffle(<4 x i32> %v, <4 x i32> %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = ashr <4 x i32> [[V:%.*]], [[TMP2]]
 ; CHECK-NEXT:    ret <4 x i32> [[TMP3]]
 ;
-  %1 = and <4 x i32> %a, <i32 undef, i32 undef, i32 15, i32 31>
+  %1 = and <4 x i32> %a, <i32 poison, i32 poison, i32 15, i32 31>
   %2 = shufflevector <4 x i32> %1, <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 2, i32 3>
   %3 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %v, <4 x i32> %2)
   ret <4 x i32> %3

diff  --git a/llvm/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll b/llvm/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll
index f633a3d43569..b992f834c3e9 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll
@@ -165,59 +165,59 @@ define <8 x double> @test_vpermilvar_pd_512(<8 x double> %v) {
   ret <8 x double> %a
 }
 
-; Verify that instcombine is able to fold constant shuffles with undef mask elements.
+; Verify that instcombine is able to fold constant shuffles with poison mask elements.
 
-define <4 x float> @undef_test_vpermilvar_ps(<4 x float> %v) {
-; CHECK-LABEL: @undef_test_vpermilvar_ps(
+define <4 x float> @poison_test_vpermilvar_ps(<4 x float> %v) {
+; CHECK-LABEL: @poison_test_vpermilvar_ps(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <4 x float> [[V:%.*]], <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 undef>
 ; CHECK-NEXT:    ret <4 x float> [[TMP1]]
 ;
-  %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 undef, i32 2, i32 1, i32 undef>)
+  %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 poison, i32 2, i32 1, i32 poison>)
   ret <4 x float> %a
 }
 
-define <8 x float> @undef_test_vpermilvar_ps_256(<8 x float> %v) {
-; CHECK-LABEL: @undef_test_vpermilvar_ps_256(
+define <8 x float> @poison_test_vpermilvar_ps_256(<8 x float> %v) {
+; CHECK-LABEL: @poison_test_vpermilvar_ps_256(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <8 x float> [[V:%.*]], <8 x float> undef, <8 x i32> <i32 undef, i32 2, i32 1, i32 undef, i32 7, i32 6, i32 5, i32 4>
 ; CHECK-NEXT:    ret <8 x float> [[TMP1]]
 ;
-  %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 undef, i32 6, i32 5, i32 undef, i32 3, i32 2, i32 1, i32 0>)
+  %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 poison, i32 6, i32 5, i32 poison, i32 3, i32 2, i32 1, i32 0>)
   ret <8 x float> %a
 }
 
-define <16 x float> @undef_test_vpermilvar_ps_512(<16 x float> %v) {
-; CHECK-LABEL: @undef_test_vpermilvar_ps_512(
+define <16 x float> @poison_test_vpermilvar_ps_512(<16 x float> %v) {
+; CHECK-LABEL: @poison_test_vpermilvar_ps_512(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <16 x float> [[V:%.*]], <16 x float> undef, <16 x i32> <i32 undef, i32 2, i32 1, i32 undef, i32 7, i32 6, i32 5, i32 4, i32 undef, i32 10, i32 9, i32 undef, i32 15, i32 14, i32 13, i32 12>
 ; CHECK-NEXT:    ret <16 x float> [[TMP1]]
 ;
-  %a = tail call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %v, <16 x i32> <i32 undef, i32 6, i32 5, i32 undef, i32 3, i32 2, i32 1, i32 0, i32 undef, i32 6, i32 5, i32 undef, i32 3, i32 2, i32 1, i32 0>)
+  %a = tail call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %v, <16 x i32> <i32 poison, i32 6, i32 5, i32 poison, i32 3, i32 2, i32 1, i32 0, i32 poison, i32 6, i32 5, i32 poison, i32 3, i32 2, i32 1, i32 0>)
   ret <16 x float> %a
 }
 
-define <2 x double> @undef_test_vpermilvar_pd(<2 x double> %v) {
-; CHECK-LABEL: @undef_test_vpermilvar_pd(
+define <2 x double> @poison_test_vpermilvar_pd(<2 x double> %v) {
+; CHECK-LABEL: @poison_test_vpermilvar_pd(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[V:%.*]], <2 x double> undef, <2 x i32> <i32 undef, i32 0>
 ; CHECK-NEXT:    ret <2 x double> [[TMP1]]
 ;
-  %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> <i64 undef, i64 0>)
+  %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> <i64 poison, i64 0>)
   ret <2 x double> %a
 }
 
-define <4 x double> @undef_test_vpermilvar_pd_256(<4 x double> %v) {
-; CHECK-LABEL: @undef_test_vpermilvar_pd_256(
+define <4 x double> @poison_test_vpermilvar_pd_256(<4 x double> %v) {
+; CHECK-LABEL: @poison_test_vpermilvar_pd_256(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <4 x double> [[V:%.*]], <4 x double> undef, <4 x i32> <i32 undef, i32 0, i32 3, i32 undef>
 ; CHECK-NEXT:    ret <4 x double> [[TMP1]]
 ;
-  %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> <i64 undef, i64 1, i64 2, i64 undef>)
+  %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> <i64 poison, i64 1, i64 2, i64 poison>)
   ret <4 x double> %a
 }
 
-define <8 x double> @undef_test_vpermilvar_pd_512(<8 x double> %v) {
-; CHECK-LABEL: @undef_test_vpermilvar_pd_512(
+define <8 x double> @poison_test_vpermilvar_pd_512(<8 x double> %v) {
+; CHECK-LABEL: @poison_test_vpermilvar_pd_512(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <8 x double> [[V:%.*]], <8 x double> undef, <8 x i32> <i32 undef, i32 0, i32 3, i32 undef, i32 undef, i32 4, i32 7, i32 undef>
 ; CHECK-NEXT:    ret <8 x double> [[TMP1]]
 ;
-  %a = tail call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %v, <8 x i64> <i64 undef, i64 1, i64 2, i64 undef, i64 undef, i64 1, i64 2, i64 undef>)
+  %a = tail call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %v, <8 x i64> <i64 poison, i64 1, i64 2, i64 poison, i64 poison, i64 1, i64 2, i64 poison>)
   ret <8 x double> %a
 }
 
@@ -230,7 +230,7 @@ define <4 x float> @elts_test_vpermilvar_ps(<4 x float> %a0, i32 %a1) {
 ;
   %1 = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %a1, i32 3
   %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %1)
-  %3 = shufflevector <4 x float> %2, <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
+  %3 = shufflevector <4 x float> %2, <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
   ret <4 x float> %3
 }
 
@@ -241,7 +241,7 @@ define <8 x float> @elts_test_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1)
 ;
   %1 = shufflevector <8 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 2, i32 1, i32 0>, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1)
-  %3 = shufflevector <8 x float> %2, <8 x float> poison, <8 x i32> <i32 undef, i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7>
+  %3 = shufflevector <8 x float> %2, <8 x float> poison, <8 x i32> <i32 poison, i32 1, i32 poison, i32 3, i32 poison, i32 5, i32 poison, i32 7>
   ret <8 x float> %3
 }
 
@@ -253,7 +253,7 @@ define <16 x float> @elts_test_vpermilvar_ps_512(<16 x float> %a0, <16 x i32> %a
 ;
   %1 = insertelement <16 x i32> %a1, i32 %a2, i32 0
   %2 = tail call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %a0, <16 x i32> %1)
-  %3 = shufflevector <16 x float> %2, <16 x float> poison, <16 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %3 = shufflevector <16 x float> %2, <16 x float> poison, <16 x i32> <i32 poison, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <16 x float> %3
 }
 
@@ -264,7 +264,7 @@ define <2 x double> @elts_test_vpermilvar_pd(<2 x double> %a0, i64 %a1) {
 ;
   %1 = insertelement <2 x i64> <i64 0, i64 2>, i64 %a1, i32 1
   %2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %1)
-  %3 = shufflevector <2 x double> %2, <2 x double> poison, <2 x i32> <i32 0, i32 undef>
+  %3 = shufflevector <2 x double> %2, <2 x double> poison, <2 x i32> <i32 0, i32 poison>
   ret <2 x double> %3
 }
 
@@ -275,7 +275,7 @@ define <4 x double> @elts_test_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1
 ;
   %1 = shufflevector <4 x i64> <i64 0, i64 2, i64 0, i64 2>, <4 x i64> %a1, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
   %2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %1)
-  %3 = shufflevector <4 x double> %2, <4 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
+  %3 = shufflevector <4 x double> %2, <4 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
   ret <4 x double> %3
 }
 


        


More information about the llvm-branch-commits mailing list