[llvm] 8d1169c - [RISCV][2/3] Switch undef -> poison in fixed-vector RVV tests

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 1 03:19:48 PST 2022


Author: Fraser Cormack
Date: 2022-02-01T11:06:56Z
New Revision: 8d1169cf74ecd362cb7992fafe7eeca9771a625f

URL: https://github.com/llvm/llvm-project/commit/8d1169cf74ecd362cb7992fafe7eeca9771a625f
DIFF: https://github.com/llvm/llvm-project/commit/8d1169cf74ecd362cb7992fafe7eeca9771a625f.diff

LOG: [RISCV][2/3] Switch undef -> poison in fixed-vector RVV tests

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index 90900c60ee44..5aa39eb50224 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -234,8 +234,8 @@ define fastcc <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %
 ; LMULMAX4-NEXT:    ret
   %r = add <32 x i32> %x, %y
   %s = add <32 x i32> %r, %z
-  %head = insertelement <32 x i32> undef, i32 %w, i32 0
-  %splat = shufflevector <32 x i32> %head, <32 x i32> undef, <32 x i32> zeroinitializer
+  %head = insertelement <32 x i32> poison, i32 %w, i32 0
+  %splat = shufflevector <32 x i32> %head, <32 x i32> poison, <32 x i32> zeroinitializer
   %t = add <32 x i32> %s, %splat
   ret <32 x i32> %t
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index bc6599de8e67..6417975fc394 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -673,8 +673,8 @@ define <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32
 ; LMULMAX1-NEXT:    ret
   %r = add <32 x i32> %x, %y
   %s = add <32 x i32> %r, %z
-  %head = insertelement <32 x i32> undef, i32 %w, i32 0
-  %splat = shufflevector <32 x i32> %head, <32 x i32> undef, <32 x i32> zeroinitializer
+  %head = insertelement <32 x i32> poison, i32 %w, i32 0
+  %splat = shufflevector <32 x i32> %head, <32 x i32> poison, <32 x i32> zeroinitializer
   %t = add <32 x i32> %s, %splat
   ret <32 x i32> %t
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index 3d1953930833..9db7b3c39e8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -138,7 +138,7 @@ define void @buildvec_dominant1_v4f32(<4 x float>* %x, float %f) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    ret
-  %v0 = insertelement <4 x float> undef, float %f, i32 0
+  %v0 = insertelement <4 x float> poison, float %f, i32 0
   %v1 = insertelement <4 x float> %v0, float 0.0, i32 1
   %v2 = insertelement <4 x float> %v1, float %f, i32 2
   %v3 = insertelement <4 x float> %v2, float %f, i32 3
@@ -159,7 +159,7 @@ define void @buildvec_dominant2_v4f32(<4 x float>* %x, float %f) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    ret
-  %v0 = insertelement <4 x float> undef, float %f, i32 0
+  %v0 = insertelement <4 x float> poison, float %f, i32 0
   %v1 = insertelement <4 x float> %v0, float 2.0, i32 1
   %v2 = insertelement <4 x float> %v1, float %f, i32 2
   %v3 = insertelement <4 x float> %v2, float %f, i32 3
@@ -193,7 +193,7 @@ define void @buildvec_merge0_v4f32(<4 x float>* %x, float %f) {
 ; RV64-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    ret
-  %v0 = insertelement <4 x float> undef, float %f, i32 0
+  %v0 = insertelement <4 x float> poison, float %f, i32 0
   %v1 = insertelement <4 x float> %v0, float 2.0, i32 1
   %v2 = insertelement <4 x float> %v1, float 2.0, i32 2
   %v3 = insertelement <4 x float> %v2, float %f, i32 3

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
index 32d272512c0b..305fbcea3557 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
@@ -517,8 +517,8 @@ define void @fcmp_oeq_vf_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fcmp oeq <8 x half> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -533,8 +533,8 @@ define void @fcmp_oeq_vf_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fcmp nnan oeq <8 x half> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -558,8 +558,8 @@ define void @fcmp_une_vf_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fcmp une <4 x float> %a, %c
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -583,8 +583,8 @@ define void @fcmp_une_vf_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fcmp nnan une <4 x float> %a, %c
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -608,8 +608,8 @@ define void @fcmp_ogt_vf_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fcmp ogt <2 x double> %a, %c
   store <2 x i1> %d, <2 x i1>* %z
   ret void
@@ -633,8 +633,8 @@ define void @fcmp_ogt_vf_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fcmp nnan ogt <2 x double> %a, %c
   store <2 x i1> %d, <2 x i1>* %z
   ret void
@@ -649,8 +649,8 @@ define void @fcmp_olt_vf_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
-  %b = insertelement <16 x half> undef, half %y, i32 0
-  %c = shufflevector <16 x half> %b, <16 x half> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x half> poison, half %y, i32 0
+  %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
   %d = fcmp olt <16 x half> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -665,8 +665,8 @@ define void @fcmp_olt_vf_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
-  %b = insertelement <16 x half> undef, half %y, i32 0
-  %c = shufflevector <16 x half> %b, <16 x half> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x half> poison, half %y, i32 0
+  %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
   %d = fcmp nnan olt <16 x half> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -681,8 +681,8 @@ define void @fcmp_oge_vf_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
-  %b = insertelement <8 x float> undef, float %y, i32 0
-  %c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x float> poison, float %y, i32 0
+  %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
   %d = fcmp oge <8 x float> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -697,8 +697,8 @@ define void @fcmp_oge_vf_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
-  %b = insertelement <8 x float> undef, float %y, i32 0
-  %c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x float> poison, float %y, i32 0
+  %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
   %d = fcmp nnan oge <8 x float> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -722,8 +722,8 @@ define void @fcmp_ole_vf_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
-  %b = insertelement <4 x double> undef, double %y, i32 0
-  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x double> poison, double %y, i32 0
+  %c = shufflevector <4 x double> %b, <4 x double> poison, <4 x i32> zeroinitializer
   %d = fcmp ole <4 x double> %a, %c
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -747,8 +747,8 @@ define void @fcmp_ole_vf_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
-  %b = insertelement <4 x double> undef, double %y, i32 0
-  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x double> poison, double %y, i32 0
+  %c = shufflevector <4 x double> %b, <4 x double> poison, <4 x i32> zeroinitializer
   %d = fcmp nnan ole <4 x double> %a, %c
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -765,8 +765,8 @@ define void @fcmp_ule_vf_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x half>, <32 x half>* %x
-  %b = insertelement <32 x half> undef, half %y, i32 0
-  %c = shufflevector <32 x half> %b, <32 x half> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x half> poison, half %y, i32 0
+  %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
   %d = fcmp ule <32 x half> %a, %c
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -782,8 +782,8 @@ define void @fcmp_ule_vf_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x half>, <32 x half>* %x
-  %b = insertelement <32 x half> undef, half %y, i32 0
-  %c = shufflevector <32 x half> %b, <32 x half> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x half> poison, half %y, i32 0
+  %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
   %d = fcmp nnan ule <32 x half> %a, %c
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -799,8 +799,8 @@ define void @fcmp_uge_vf_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
-  %b = insertelement <16 x float> undef, float %y, i32 0
-  %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x float> poison, float %y, i32 0
+  %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
   %d = fcmp uge <16 x float> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -815,8 +815,8 @@ define void @fcmp_uge_vf_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
-  %b = insertelement <16 x float> undef, float %y, i32 0
-  %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x float> poison, float %y, i32 0
+  %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
   %d = fcmp nnan uge <16 x float> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -832,8 +832,8 @@ define void @fcmp_ult_vf_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
-  %b = insertelement <8 x double> undef, double %y, i32 0
-  %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x double> poison, double %y, i32 0
+  %c = shufflevector <8 x double> %b, <8 x double> poison, <8 x i32> zeroinitializer
   %d = fcmp ult <8 x double> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -848,8 +848,8 @@ define void @fcmp_ult_vf_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
-  %b = insertelement <8 x double> undef, double %y, i32 0
-  %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x double> poison, double %y, i32 0
+  %c = shufflevector <8 x double> %b, <8 x double> poison, <8 x i32> zeroinitializer
   %d = fcmp nnan ult <8 x double> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -866,8 +866,8 @@ define void @fcmp_ugt_vf_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
-  %b = insertelement <64 x half> undef, half %y, i32 0
-  %c = shufflevector <64 x half> %b, <64 x half> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x half> poison, half %y, i32 0
+  %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer
   %d = fcmp ugt <64 x half> %a, %c
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -883,8 +883,8 @@ define void @fcmp_ugt_vf_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
-  %b = insertelement <64 x half> undef, half %y, i32 0
-  %c = shufflevector <64 x half> %b, <64 x half> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x half> poison, half %y, i32 0
+  %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer
   %d = fcmp nnan ugt <64 x half> %a, %c
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -902,8 +902,8 @@ define void @fcmp_ueq_vf_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
-  %b = insertelement <32 x float> undef, float %y, i32 0
-  %c = shufflevector <32 x float> %b, <32 x float> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x float> poison, float %y, i32 0
+  %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
   %d = fcmp ueq <32 x float> %a, %c
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -919,8 +919,8 @@ define void @fcmp_ueq_vf_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
-  %b = insertelement <32 x float> undef, float %y, i32 0
-  %c = shufflevector <32 x float> %b, <32 x float> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x float> poison, float %y, i32 0
+  %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
   %d = fcmp nnan ueq <32 x float> %a, %c
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -937,8 +937,8 @@ define void @fcmp_one_vf_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
-  %b = insertelement <16 x double> undef, double %y, i32 0
-  %c = shufflevector <16 x double> %b, <16 x double> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x double> poison, double %y, i32 0
+  %c = shufflevector <16 x double> %b, <16 x double> poison, <16 x i32> zeroinitializer
   %d = fcmp one <16 x double> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -953,8 +953,8 @@ define void @fcmp_one_vf_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
-  %b = insertelement <16 x double> undef, double %y, i32 0
-  %c = shufflevector <16 x double> %b, <16 x double> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x double> poison, double %y, i32 0
+  %c = shufflevector <16 x double> %b, <16 x double> poison, <16 x i32> zeroinitializer
   %d = fcmp nnan one <16 x double> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -981,8 +981,8 @@ define void @fcmp_ord_vf_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, <4 x half>* %x
-  %b = insertelement <4 x half> undef, half %y, i32 0
-  %c = shufflevector <4 x half> %b, <4 x half> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x half> poison, half %y, i32 0
+  %c = shufflevector <4 x half> %b, <4 x half> poison, <4 x i32> zeroinitializer
   %d = fcmp ord <4 x half> %a, %c
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -1009,8 +1009,8 @@ define void @fcmp_uno_vf_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x half>, <2 x half>* %x
-  %b = insertelement <2 x half> undef, half %y, i32 0
-  %c = shufflevector <2 x half> %b, <2 x half> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x half> poison, half %y, i32 0
+  %c = shufflevector <2 x half> %b, <2 x half> poison, <2 x i32> zeroinitializer
   %d = fcmp uno <2 x half> %a, %c
   store <2 x i1> %d, <2 x i1>* %z
   ret void
@@ -1025,8 +1025,8 @@ define void @fcmp_oeq_fv_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fcmp oeq <8 x half> %c, %a
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -1041,8 +1041,8 @@ define void @fcmp_oeq_fv_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fcmp nnan oeq <8 x half> %c, %a
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -1066,8 +1066,8 @@ define void @fcmp_une_fv_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fcmp une <4 x float> %c, %a
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -1091,8 +1091,8 @@ define void @fcmp_une_fv_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fcmp nnan une <4 x float> %c, %a
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -1116,8 +1116,8 @@ define void @fcmp_ogt_fv_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fcmp ogt <2 x double> %c, %a
   store <2 x i1> %d, <2 x i1>* %z
   ret void
@@ -1141,8 +1141,8 @@ define void @fcmp_ogt_fv_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fcmp nnan ogt <2 x double> %c, %a
   store <2 x i1> %d, <2 x i1>* %z
   ret void
@@ -1157,8 +1157,8 @@ define void @fcmp_olt_fv_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
-  %b = insertelement <16 x half> undef, half %y, i32 0
-  %c = shufflevector <16 x half> %b, <16 x half> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x half> poison, half %y, i32 0
+  %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
   %d = fcmp olt <16 x half> %c, %a
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -1173,8 +1173,8 @@ define void @fcmp_olt_fv_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
-  %b = insertelement <16 x half> undef, half %y, i32 0
-  %c = shufflevector <16 x half> %b, <16 x half> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x half> poison, half %y, i32 0
+  %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
   %d = fcmp nnan olt <16 x half> %c, %a
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -1189,8 +1189,8 @@ define void @fcmp_oge_fv_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
-  %b = insertelement <8 x float> undef, float %y, i32 0
-  %c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x float> poison, float %y, i32 0
+  %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
   %d = fcmp oge <8 x float> %c, %a
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -1205,8 +1205,8 @@ define void @fcmp_oge_fv_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
-  %b = insertelement <8 x float> undef, float %y, i32 0
-  %c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x float> poison, float %y, i32 0
+  %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
   %d = fcmp nnan oge <8 x float> %c, %a
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -1230,8 +1230,8 @@ define void @fcmp_ole_fv_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
-  %b = insertelement <4 x double> undef, double %y, i32 0
-  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x double> poison, double %y, i32 0
+  %c = shufflevector <4 x double> %b, <4 x double> poison, <4 x i32> zeroinitializer
   %d = fcmp ole <4 x double> %c, %a
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -1255,8 +1255,8 @@ define void @fcmp_ole_fv_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
-  %b = insertelement <4 x double> undef, double %y, i32 0
-  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x double> poison, double %y, i32 0
+  %c = shufflevector <4 x double> %b, <4 x double> poison, <4 x i32> zeroinitializer
   %d = fcmp nnan ole <4 x double> %c, %a
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -1273,8 +1273,8 @@ define void @fcmp_ule_fv_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x half>, <32 x half>* %x
-  %b = insertelement <32 x half> undef, half %y, i32 0
-  %c = shufflevector <32 x half> %b, <32 x half> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x half> poison, half %y, i32 0
+  %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
   %d = fcmp ule <32 x half> %c, %a
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -1290,8 +1290,8 @@ define void @fcmp_ule_fv_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x half>, <32 x half>* %x
-  %b = insertelement <32 x half> undef, half %y, i32 0
-  %c = shufflevector <32 x half> %b, <32 x half> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x half> poison, half %y, i32 0
+  %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
   %d = fcmp nnan ule <32 x half> %c, %a
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -1307,8 +1307,8 @@ define void @fcmp_uge_fv_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
-  %b = insertelement <16 x float> undef, float %y, i32 0
-  %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x float> poison, float %y, i32 0
+  %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
   %d = fcmp uge <16 x float> %c, %a
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -1323,8 +1323,8 @@ define void @fcmp_uge_fv_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
-  %b = insertelement <16 x float> undef, float %y, i32 0
-  %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x float> poison, float %y, i32 0
+  %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
   %d = fcmp nnan uge <16 x float> %c, %a
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -1340,8 +1340,8 @@ define void @fcmp_ult_fv_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
-  %b = insertelement <8 x double> undef, double %y, i32 0
-  %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x double> poison, double %y, i32 0
+  %c = shufflevector <8 x double> %b, <8 x double> poison, <8 x i32> zeroinitializer
   %d = fcmp ult <8 x double> %c, %a
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -1356,8 +1356,8 @@ define void @fcmp_ult_fv_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
-  %b = insertelement <8 x double> undef, double %y, i32 0
-  %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x double> poison, double %y, i32 0
+  %c = shufflevector <8 x double> %b, <8 x double> poison, <8 x i32> zeroinitializer
   %d = fcmp nnan ult <8 x double> %c, %a
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -1374,8 +1374,8 @@ define void @fcmp_ugt_fv_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
-  %b = insertelement <64 x half> undef, half %y, i32 0
-  %c = shufflevector <64 x half> %b, <64 x half> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x half> poison, half %y, i32 0
+  %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer
   %d = fcmp ugt <64 x half> %c, %a
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -1391,8 +1391,8 @@ define void @fcmp_ugt_fv_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z)
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
-  %b = insertelement <64 x half> undef, half %y, i32 0
-  %c = shufflevector <64 x half> %b, <64 x half> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x half> poison, half %y, i32 0
+  %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer
   %d = fcmp nnan ugt <64 x half> %c, %a
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -1410,8 +1410,8 @@ define void @fcmp_ueq_fv_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
-  %b = insertelement <32 x float> undef, float %y, i32 0
-  %c = shufflevector <32 x float> %b, <32 x float> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x float> poison, float %y, i32 0
+  %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
   %d = fcmp ueq <32 x float> %c, %a
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -1427,8 +1427,8 @@ define void @fcmp_ueq_fv_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
-  %b = insertelement <32 x float> undef, float %y, i32 0
-  %c = shufflevector <32 x float> %b, <32 x float> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x float> poison, float %y, i32 0
+  %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
   %d = fcmp nnan ueq <32 x float> %c, %a
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -1445,8 +1445,8 @@ define void @fcmp_one_fv_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
-  %b = insertelement <16 x double> undef, double %y, i32 0
-  %c = shufflevector <16 x double> %b, <16 x double> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x double> poison, double %y, i32 0
+  %c = shufflevector <16 x double> %b, <16 x double> poison, <16 x i32> zeroinitializer
   %d = fcmp one <16 x double> %c, %a
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -1461,8 +1461,8 @@ define void @fcmp_one_fv_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
-  %b = insertelement <16 x double> undef, double %y, i32 0
-  %c = shufflevector <16 x double> %b, <16 x double> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x double> poison, double %y, i32 0
+  %c = shufflevector <16 x double> %b, <16 x double> poison, <16 x i32> zeroinitializer
   %d = fcmp nnan one <16 x double> %c, %a
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -1489,8 +1489,8 @@ define void @fcmp_ord_fv_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, <4 x half>* %x
-  %b = insertelement <4 x half> undef, half %y, i32 0
-  %c = shufflevector <4 x half> %b, <4 x half> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x half> poison, half %y, i32 0
+  %c = shufflevector <4 x half> %b, <4 x half> poison, <4 x i32> zeroinitializer
   %d = fcmp ord <4 x half> %c, %a
   store <4 x i1> %d, <4 x i1>* %z
   ret void
@@ -1517,8 +1517,8 @@ define void @fcmp_uno_fv_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x half>, <2 x half>* %x
-  %b = insertelement <2 x half> undef, half %y, i32 0
-  %c = shufflevector <2 x half> %b, <2 x half> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x half> poison, half %y, i32 0
+  %c = shufflevector <2 x half> %b, <2 x half> poison, <2 x i32> zeroinitializer
   %d = fcmp uno <2 x half> %c, %a
   store <2 x i1> %d, <2 x i1>* %z
   ret void

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index ecd4c85b4f56..48cbc82faa35 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -101,7 +101,7 @@ define <4 x double> @vrgather_permute_shuffle_vu_v4f64(<4 x double> %x) {
 ; RV64-NEXT:    vrgather.vv v10, v8, v12
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
-  %s = shufflevector <4 x double> %x, <4 x double> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 1>
+  %s = shufflevector <4 x double> %x, <4 x double> poison, <4 x i32> <i32 1, i32 2, i32 0, i32 1>
   ret <4 x double> %s
 }
 
@@ -126,7 +126,7 @@ define <4 x double> @vrgather_permute_shuffle_uv_v4f64(<4 x double> %x) {
 ; RV64-NEXT:    vrgather.vv v10, v8, v12
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
-  %s = shufflevector <4 x double> undef, <4 x double> %x, <4 x i32> <i32 5, i32 6, i32 4, i32 5>
+  %s = shufflevector <4 x double> poison, <4 x double> %x, <4 x i32> <i32 5, i32 6, i32 4, i32 5>
   ret <4 x double> %s
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
index 9fb7b4927f71..3b45e149a1e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
@@ -11,8 +11,8 @@ define void @splat_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x half> undef, half %y, i32 0
-  %b = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x half> poison, half %y, i32 0
+  %b = shufflevector <8 x half> %a, <8 x half> poison, <8 x i32> zeroinitializer
   store <8 x half> %b, <8 x half>* %x
   ret void
 }
@@ -24,8 +24,8 @@ define void @splat_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x float> undef, float %y, i32 0
-  %b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x float> poison, float %y, i32 0
+  %b = shufflevector <4 x float> %a, <4 x float> poison, <4 x i32> zeroinitializer
   store <4 x float> %b, <4 x float>* %x
   ret void
 }
@@ -37,8 +37,8 @@ define void @splat_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x double> undef, double %y, i32 0
-  %b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer
+  %a = insertelement <2 x double> poison, double %y, i32 0
+  %b = shufflevector <2 x double> %a, <2 x double> poison, <2 x i32> zeroinitializer
   store <2 x double> %b, <2 x double>* %x
   ret void
 }
@@ -59,8 +59,8 @@ define void @splat_16f16(<16 x half>* %x, half %y) {
 ; LMULMAX1-NEXT:    vse16.v v8, (a1)
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <16 x half> undef, half %y, i32 0
-  %b = shufflevector <16 x half> %a, <16 x half> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x half> poison, half %y, i32 0
+  %b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
   store <16 x half> %b, <16 x half>* %x
   ret void
 }
@@ -81,8 +81,8 @@ define void @splat_v8f32(<8 x float>* %x, float %y) {
 ; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <8 x float> undef, float %y, i32 0
-  %b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x float> poison, float %y, i32 0
+  %b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
   store <8 x float> %b, <8 x float>* %x
   ret void
 }
@@ -103,8 +103,8 @@ define void @splat_v4f64(<4 x double>* %x, double %y) {
 ; LMULMAX1-NEXT:    vse64.v v8, (a1)
 ; LMULMAX1-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <4 x double> undef, double %y, i32 0
-  %b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x double> poison, double %y, i32 0
+  %b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
   store <4 x double> %b, <4 x double>* %x
   ret void
 }
@@ -116,8 +116,8 @@ define void @splat_zero_v8f16(<8 x half>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x half> undef, half 0.0, i32 0
-  %b = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x half> poison, half 0.0, i32 0
+  %b = shufflevector <8 x half> %a, <8 x half> poison, <8 x i32> zeroinitializer
   store <8 x half> %b, <8 x half>* %x
   ret void
 }
@@ -129,8 +129,8 @@ define void @splat_zero_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x float> undef, float 0.0, i32 0
-  %b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x float> poison, float 0.0, i32 0
+  %b = shufflevector <4 x float> %a, <4 x float> poison, <4 x i32> zeroinitializer
   store <4 x float> %b, <4 x float>* %x
   ret void
 }
@@ -142,8 +142,8 @@ define void @splat_zero_v2f64(<2 x double>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x double> undef, double 0.0, i32 0
-  %b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer
+  %a = insertelement <2 x double> poison, double 0.0, i32 0
+  %b = shufflevector <2 x double> %a, <2 x double> poison, <2 x i32> zeroinitializer
   store <2 x double> %b, <2 x double>* %x
   ret void
 }
@@ -164,8 +164,8 @@ define void @splat_zero_16f16(<16 x half>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <16 x half> undef, half 0.0, i32 0
-  %b = shufflevector <16 x half> %a, <16 x half> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x half> poison, half 0.0, i32 0
+  %b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
   store <16 x half> %b, <16 x half>* %x
   ret void
 }
@@ -186,8 +186,8 @@ define void @splat_zero_v8f32(<8 x float>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <8 x float> undef, float 0.0, i32 0
-  %b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x float> poison, float 0.0, i32 0
+  %b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
   store <8 x float> %b, <8 x float>* %x
   ret void
 }
@@ -208,8 +208,8 @@ define void @splat_zero_v4f64(<4 x double>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <4 x double> undef, double 0.0, i32 0
-  %b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x double> poison, double 0.0, i32 0
+  %b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
   store <4 x double> %b, <4 x double>* %x
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
index 1478249d2ec9..f9d2e7340794 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
@@ -14,8 +14,8 @@ define void @gather_const_v8f16(<8 x half>* %x) {
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = extractelement <8 x half> %a, i32 5
-  %c = insertelement <8 x half> undef, half %b, i32 0
-  %d = shufflevector <8 x half> %c, <8 x half> undef, <8 x i32> zeroinitializer
+  %c = insertelement <8 x half> poison, half %b, i32 0
+  %d = shufflevector <8 x half> %c, <8 x half> poison, <8 x i32> zeroinitializer
   store <8 x half> %d, <8 x half>* %x
   ret void
 }
@@ -30,8 +30,8 @@ define void @gather_const_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
   %b = extractelement <4 x float> %a, i32 2
-  %c = insertelement <4 x float> undef, float %b, i32 0
-  %d = shufflevector <4 x float> %c, <4 x float> undef, <4 x i32> zeroinitializer
+  %c = insertelement <4 x float> poison, float %b, i32 0
+  %d = shufflevector <4 x float> %c, <4 x float> poison, <4 x i32> zeroinitializer
   store <4 x float> %d, <4 x float>* %x
   ret void
 }
@@ -45,8 +45,8 @@ define void @gather_const_v2f64(<2 x double>* %x) {
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
   %b = extractelement <2 x double> %a, i32 0
-  %c = insertelement <2 x double> undef, double %b, i32 0
-  %d = shufflevector <2 x double> %c, <2 x double> undef, <2 x i32> zeroinitializer
+  %c = insertelement <2 x double> poison, double %b, i32 0
+  %d = shufflevector <2 x double> %c, <2 x double> poison, <2 x i32> zeroinitializer
   store <2 x double> %d, <2 x double>* %x
   ret void
 }
@@ -84,8 +84,8 @@ define void @gather_const_v64f16(<64 x half>* %x) {
 ; LMULMAX1-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
   %b = extractelement <64 x half> %a, i32 47
-  %c = insertelement <64 x half> undef, half %b, i32 0
-  %d = shufflevector <64 x half> %c, <64 x half> undef, <64 x i32> zeroinitializer
+  %c = insertelement <64 x half> poison, half %b, i32 0
+  %d = shufflevector <64 x half> %c, <64 x half> poison, <64 x i32> zeroinitializer
   store <64 x half> %d, <64 x half>* %x
   ret void
 }
@@ -123,8 +123,8 @@ define void @gather_const_v32f32(<32 x float>* %x) {
 ; LMULMAX1-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
   %b = extractelement <32 x float> %a, i32 17
-  %c = insertelement <32 x float> undef, float %b, i32 0
-  %d = shufflevector <32 x float> %c, <32 x float> undef, <32 x i32> zeroinitializer
+  %c = insertelement <32 x float> poison, float %b, i32 0
+  %d = shufflevector <32 x float> %c, <32 x float> poison, <32 x i32> zeroinitializer
   store <32 x float> %d, <32 x float>* %x
   ret void
 }
@@ -160,8 +160,8 @@ define void @gather_const_v16f64(<16 x double>* %x) {
 ; LMULMAX1-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
   %b = extractelement <16 x double> %a, i32 10
-  %c = insertelement <16 x double> undef, double %b, i32 0
-  %d = shufflevector <16 x double> %c, <16 x double> undef, <16 x i32> zeroinitializer
+  %c = insertelement <16 x double> poison, double %b, i32 0
+  %d = shufflevector <16 x double> %c, <16 x double> poison, <16 x i32> zeroinitializer
   store <16 x double> %d, <16 x double>* %x
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index cb316141e5b7..ba63fdc61da9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -343,8 +343,8 @@ define void @copysign_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %c)
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -359,8 +359,8 @@ define void @copysign_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %c)
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -375,8 +375,8 @@ define void @copysign_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %c)
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1384,8 +1384,8 @@ define void @fadd_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fadd <8 x half> %a, %c
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -1400,8 +1400,8 @@ define void @fadd_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fadd <4 x float> %a, %c
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -1416,8 +1416,8 @@ define void @fadd_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fadd <2 x double> %a, %c
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1432,8 +1432,8 @@ define void @fadd_fv_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fadd <8 x half> %c, %a
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -1448,8 +1448,8 @@ define void @fadd_fv_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fadd <4 x float> %c, %a
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -1464,8 +1464,8 @@ define void @fadd_fv_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fadd <2 x double> %c, %a
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1480,8 +1480,8 @@ define void @fsub_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fsub <8 x half> %a, %c
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -1496,8 +1496,8 @@ define void @fsub_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fsub <4 x float> %a, %c
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -1512,8 +1512,8 @@ define void @fsub_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fsub <2 x double> %a, %c
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1528,8 +1528,8 @@ define void @fsub_fv_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fsub <8 x half> %c, %a
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -1544,8 +1544,8 @@ define void @fsub_fv_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fsub <4 x float> %c, %a
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -1560,8 +1560,8 @@ define void @fsub_fv_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fsub <2 x double> %c, %a
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1576,8 +1576,8 @@ define void @fmul_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fmul <8 x half> %a, %c
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -1592,8 +1592,8 @@ define void @fmul_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fmul <4 x float> %a, %c
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -1608,8 +1608,8 @@ define void @fmul_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fmul <2 x double> %a, %c
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1624,8 +1624,8 @@ define void @fmul_fv_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fmul <8 x half> %c, %a
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -1640,8 +1640,8 @@ define void @fmul_fv_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fmul <4 x float> %c, %a
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -1656,8 +1656,8 @@ define void @fmul_fv_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fmul <2 x double> %c, %a
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1672,8 +1672,8 @@ define void @fdiv_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fdiv <8 x half> %a, %c
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -1688,8 +1688,8 @@ define void @fdiv_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fdiv <4 x float> %a, %c
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -1704,8 +1704,8 @@ define void @fdiv_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fdiv <2 x double> %a, %c
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1720,8 +1720,8 @@ define void @fdiv_fv_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
-  %b = insertelement <8 x half> undef, half %y, i32 0
-  %c = shufflevector <8 x half> %b, <8 x half> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x half> poison, half %y, i32 0
+  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
   %d = fdiv <8 x half> %c, %a
   store <8 x half> %d, <8 x half>* %x
   ret void
@@ -1736,8 +1736,8 @@ define void @fdiv_fv_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
-  %b = insertelement <4 x float> undef, float %y, i32 0
-  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x float> poison, float %y, i32 0
+  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
   %d = fdiv <4 x float> %c, %a
   store <4 x float> %d, <4 x float>* %x
   ret void
@@ -1752,8 +1752,8 @@ define void @fdiv_fv_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
-  %b = insertelement <2 x double> undef, double %y, i32 0
-  %c = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x double> poison, double %y, i32 0
+  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
   %d = fdiv <2 x double> %c, %a
   store <2 x double> %d, <2 x double>* %x
   ret void
@@ -1770,8 +1770,8 @@ define void @fma_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = load <8 x half>, <8 x half>* %y
-  %c = insertelement <8 x half> undef, half %z, i32 0
-  %d = shufflevector <8 x half> %c, <8 x half> undef, <8 x i32> zeroinitializer
+  %c = insertelement <8 x half> poison, half %z, i32 0
+  %d = shufflevector <8 x half> %c, <8 x half> poison, <8 x i32> zeroinitializer
   %e = call <8 x half> @llvm.fma.v8f16(<8 x half> %a, <8 x half> %d, <8 x half> %b)
   store <8 x half> %e, <8 x half>* %x
   ret void
@@ -1788,8 +1788,8 @@ define void @fma_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
   %b = load <4 x float>, <4 x float>* %y
-  %c = insertelement <4 x float> undef, float %z, i32 0
-  %d = shufflevector <4 x float> %c, <4 x float> undef, <4 x i32> zeroinitializer
+  %c = insertelement <4 x float> poison, float %z, i32 0
+  %d = shufflevector <4 x float> %c, <4 x float> poison, <4 x i32> zeroinitializer
   %e = call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %d, <4 x float> %b)
   store <4 x float> %e, <4 x float>* %x
   ret void
@@ -1806,8 +1806,8 @@ define void @fma_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
   %b = load <2 x double>, <2 x double>* %y
-  %c = insertelement <2 x double> undef, double %z, i32 0
-  %d = shufflevector <2 x double> %c, <2 x double> undef, <2 x i32> zeroinitializer
+  %c = insertelement <2 x double> poison, double %z, i32 0
+  %d = shufflevector <2 x double> %c, <2 x double> poison, <2 x i32> zeroinitializer
   %e = call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %d, <2 x double> %b)
   store <2 x double> %e, <2 x double>* %x
   ret void
@@ -1824,8 +1824,8 @@ define void @fma_fv_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = load <8 x half>, <8 x half>* %y
-  %c = insertelement <8 x half> undef, half %z, i32 0
-  %d = shufflevector <8 x half> %c, <8 x half> undef, <8 x i32> zeroinitializer
+  %c = insertelement <8 x half> poison, half %z, i32 0
+  %d = shufflevector <8 x half> %c, <8 x half> poison, <8 x i32> zeroinitializer
   %e = call <8 x half> @llvm.fma.v8f16(<8 x half> %d, <8 x half> %a, <8 x half> %b)
   store <8 x half> %e, <8 x half>* %x
   ret void
@@ -1842,8 +1842,8 @@ define void @fma_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
   %b = load <4 x float>, <4 x float>* %y
-  %c = insertelement <4 x float> undef, float %z, i32 0
-  %d = shufflevector <4 x float> %c, <4 x float> undef, <4 x i32> zeroinitializer
+  %c = insertelement <4 x float> poison, float %z, i32 0
+  %d = shufflevector <4 x float> %c, <4 x float> poison, <4 x i32> zeroinitializer
   %e = call <4 x float> @llvm.fma.v4f32(<4 x float> %d, <4 x float> %a, <4 x float> %b)
   store <4 x float> %e, <4 x float>* %x
   ret void
@@ -1860,8 +1860,8 @@ define void @fma_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
   %b = load <2 x double>, <2 x double>* %y
-  %c = insertelement <2 x double> undef, double %z, i32 0
-  %d = shufflevector <2 x double> %c, <2 x double> undef, <2 x i32> zeroinitializer
+  %c = insertelement <2 x double> poison, double %z, i32 0
+  %d = shufflevector <2 x double> %c, <2 x double> poison, <2 x i32> zeroinitializer
   %e = call <2 x double> @llvm.fma.v2f64(<2 x double> %d, <2 x double> %a, <2 x double> %b)
   store <2 x double> %e, <2 x double>* %x
   ret void
@@ -1878,8 +1878,8 @@ define void @fmsub_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = load <8 x half>, <8 x half>* %y
-  %c = insertelement <8 x half> undef, half %z, i32 0
-  %d = shufflevector <8 x half> %c, <8 x half> undef, <8 x i32> zeroinitializer
+  %c = insertelement <8 x half> poison, half %z, i32 0
+  %d = shufflevector <8 x half> %c, <8 x half> poison, <8 x i32> zeroinitializer
   %neg = fneg <8 x half> %b
   %e = call <8 x half> @llvm.fma.v8f16(<8 x half> %a, <8 x half> %d, <8 x half> %neg)
   store <8 x half> %e, <8 x half>* %x
@@ -1897,8 +1897,8 @@ define void @fnmsub_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
   %b = load <4 x float>, <4 x float>* %y
-  %c = insertelement <4 x float> undef, float %z, i32 0
-  %d = shufflevector <4 x float> %c, <4 x float> undef, <4 x i32> zeroinitializer
+  %c = insertelement <4 x float> poison, float %z, i32 0
+  %d = shufflevector <4 x float> %c, <4 x float> poison, <4 x i32> zeroinitializer
   %neg = fneg <4 x float> %a
   %e = call <4 x float> @llvm.fma.v4f32(<4 x float> %neg, <4 x float> %d, <4 x float> %b)
   store <4 x float> %e, <4 x float>* %x
@@ -1916,8 +1916,8 @@ define void @fnmadd_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
   %b = load <2 x double>, <2 x double>* %y
-  %c = insertelement <2 x double> undef, double %z, i32 0
-  %d = shufflevector <2 x double> %c, <2 x double> undef, <2 x i32> zeroinitializer
+  %c = insertelement <2 x double> poison, double %z, i32 0
+  %d = shufflevector <2 x double> %c, <2 x double> poison, <2 x i32> zeroinitializer
   %neg = fneg <2 x double> %a
   %neg2 = fneg <2 x double> %b
   %e = call <2 x double> @llvm.fma.v2f64(<2 x double> %neg, <2 x double> %d, <2 x double> %neg2)
@@ -1936,8 +1936,8 @@ define void @fnmsub_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
   %b = load <4 x float>, <4 x float>* %y
-  %c = insertelement <4 x float> undef, float %z, i32 0
-  %d = shufflevector <4 x float> %c, <4 x float> undef, <4 x i32> zeroinitializer
+  %c = insertelement <4 x float> poison, float %z, i32 0
+  %d = shufflevector <4 x float> %c, <4 x float> poison, <4 x i32> zeroinitializer
   %neg = fneg <4 x float> %d
   %e = call <4 x float> @llvm.fma.v4f32(<4 x float> %neg, <4 x float> %a, <4 x float> %b)
   store <4 x float> %e, <4 x float>* %x
@@ -1955,8 +1955,8 @@ define void @fnmadd_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
   %b = load <2 x double>, <2 x double>* %y
-  %c = insertelement <2 x double> undef, double %z, i32 0
-  %d = shufflevector <2 x double> %c, <2 x double> undef, <2 x i32> zeroinitializer
+  %c = insertelement <2 x double> poison, double %z, i32 0
+  %d = shufflevector <2 x double> %c, <2 x double> poison, <2 x i32> zeroinitializer
   %neg = fneg <2 x double> %d
   %neg2 = fneg <2 x double> %b
   %e = call <2 x double> @llvm.fma.v2f64(<2 x double> %neg, <2 x double> %a, <2 x double> %neg2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
index 680ac37101c5..09701ffca0f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
@@ -187,8 +187,8 @@ define void @seteq_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = icmp eq <16 x i8> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -204,8 +204,8 @@ define void @setne_vx_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a2)
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = icmp ne <32 x i8> %a, %c
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -221,8 +221,8 @@ define void @setgt_vx_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v12, (a2)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = icmp sgt <64 x i8> %a, %c
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -238,8 +238,8 @@ define void @setlt_vx_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v16, (a2)
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, <128 x i8>* %x
-  %b = insertelement <128 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <128 x i8> %b, <128 x i8> undef, <128 x i32> zeroinitializer
+  %b = insertelement <128 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
   %d = icmp slt <128 x i8> %a, %c
   store <128 x i1> %d, <128 x i1>* %z
   ret void
@@ -255,8 +255,8 @@ define void @setge_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = icmp sge <8 x i8> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -271,8 +271,8 @@ define void @setle_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = icmp sle <16 x i8> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -288,8 +288,8 @@ define void @setugt_vx_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a2)
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = icmp ugt <32 x i8> %a, %c
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -305,8 +305,8 @@ define void @setult_vx_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v12, (a2)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = icmp ult <64 x i8> %a, %c
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -323,8 +323,8 @@ define void @setuge_vx_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v24, (a2)
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, <128 x i8>* %x
-  %b = insertelement <128 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <128 x i8> %b, <128 x i8> undef, <128 x i32> zeroinitializer
+  %b = insertelement <128 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
   %d = icmp uge <128 x i8> %a, %c
   store <128 x i1> %d, <128 x i1>* %z
   ret void
@@ -339,8 +339,8 @@ define void @setule_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = icmp ule <8 x i8> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -355,8 +355,8 @@ define void @seteq_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = icmp eq <16 x i8> %c, %a
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -372,8 +372,8 @@ define void @setne_xv_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a2)
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = icmp ne <32 x i8> %c, %a
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -389,8 +389,8 @@ define void @setgt_xv_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v12, (a2)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = icmp sgt <64 x i8> %c, %a
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -406,8 +406,8 @@ define void @setlt_xv_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v16, (a2)
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, <128 x i8>* %x
-  %b = insertelement <128 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <128 x i8> %b, <128 x i8> undef, <128 x i32> zeroinitializer
+  %b = insertelement <128 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
   %d = icmp slt <128 x i8> %c, %a
   store <128 x i1> %d, <128 x i1>* %z
   ret void
@@ -422,8 +422,8 @@ define void @setge_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = icmp sge <8 x i8> %c, %a
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -439,8 +439,8 @@ define void @setle_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = icmp sle <16 x i8> %c, %a
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -456,8 +456,8 @@ define void @setugt_xv_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a2)
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = icmp ugt <32 x i8> %c, %a
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -473,8 +473,8 @@ define void @setult_xv_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v12, (a2)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = icmp ult <64 x i8> %c, %a
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -490,8 +490,8 @@ define void @setuge_xv_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v16, (a2)
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, <128 x i8>* %x
-  %b = insertelement <128 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <128 x i8> %b, <128 x i8> undef, <128 x i32> zeroinitializer
+  %b = insertelement <128 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
   %d = icmp uge <128 x i8> %c, %a
   store <128 x i1> %d, <128 x i1>* %z
   ret void
@@ -507,8 +507,8 @@ define void @setule_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = icmp ule <8 x i8> %c, %a
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -523,8 +523,8 @@ define void @seteq_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 0, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 0, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = icmp eq <16 x i8> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -540,8 +540,8 @@ define void @setne_vi_v32i8(<32 x i8>* %x, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 0, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 0, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = icmp ne <32 x i8> %a, %c
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -557,8 +557,8 @@ define void @setgt_vi_v64i8(<64 x i8>* %x, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 0, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 0, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = icmp sgt <64 x i8> %a, %c
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -574,8 +574,8 @@ define void @setgt_vi_v64i8_nonzero(<64 x i8>* %x, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 5, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 5, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = icmp sgt <64 x i8> %a, %c
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -591,8 +591,8 @@ define void @setlt_vi_v128i8(<128 x i8>* %x, <128 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, <128 x i8>* %x
-  %b = insertelement <128 x i8> undef, i8 0, i32 0
-  %c = shufflevector <128 x i8> %b, <128 x i8> undef, <128 x i32> zeroinitializer
+  %b = insertelement <128 x i8> poison, i8 0, i32 0
+  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
   %d = icmp slt <128 x i8> %a, %c
   store <128 x i1> %d, <128 x i1>* %z
   ret void
@@ -607,8 +607,8 @@ define void @setge_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 0, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 0, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = icmp sge <8 x i8> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void
@@ -623,8 +623,8 @@ define void @setle_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 0, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 0, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = icmp sle <16 x i8> %a, %c
   store <16 x i1> %d, <16 x i1>* %z
   ret void
@@ -640,8 +640,8 @@ define void @setugt_vi_v32i8(<32 x i8>* %x, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 5, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 5, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = icmp ugt <32 x i8> %a, %c
   store <32 x i1> %d, <32 x i1>* %z
   ret void
@@ -657,8 +657,8 @@ define void @setult_vi_v64i8(<64 x i8>* %x, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 5, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 5, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = icmp ult <64 x i8> %a, %c
   store <64 x i1> %d, <64 x i1>* %z
   ret void
@@ -674,8 +674,8 @@ define void @setuge_vi_v128i8(<128 x i8>* %x, <128 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, <128 x i8>* %x
-  %b = insertelement <128 x i8> undef, i8 5, i32 0
-  %c = shufflevector <128 x i8> %b, <128 x i8> undef, <128 x i32> zeroinitializer
+  %b = insertelement <128 x i8> poison, i8 5, i32 0
+  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
   %d = icmp uge <128 x i8> %a, %c
   store <128 x i1> %d, <128 x i1>* %z
   ret void
@@ -690,8 +690,8 @@ define void @setule_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 5, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 5, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = icmp ule <8 x i8> %a, %c
   store <8 x i1> %d, <8 x i1>* %z
   ret void

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index b727a27d53a2..62d897220f90 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -64,7 +64,7 @@ define <4 x i16> @vrgather_permute_shuffle_vu_v4i16(<4 x i16> %x) {
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %s = shufflevector <4 x i16> %x, <4 x i16> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 1>
+  %s = shufflevector <4 x i16> %x, <4 x i16> poison, <4 x i32> <i32 1, i32 2, i32 0, i32 1>
   ret <4 x i16> %s
 }
 
@@ -78,7 +78,7 @@ define <4 x i16> @vrgather_permute_shuffle_uv_v4i16(<4 x i16> %x) {
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %s = shufflevector <4 x i16> undef, <4 x i16> %x, <4 x i32> <i32 5, i32 6, i32 4, i32 5>
+  %s = shufflevector <4 x i16> poison, <4 x i16> %x, <4 x i32> <i32 5, i32 6, i32 4, i32 5>
   ret <4 x i16> %s
 }
 
@@ -157,7 +157,7 @@ define <8 x i64> @vrgather_permute_shuffle_vu_v8i64(<8 x i64> %x) {
 ; RV64-NEXT:    vrgather.vv v12, v8, v16
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %s = shufflevector <8 x i64> %x, <8 x i64> undef, <8 x i32> <i32 1, i32 2, i32 0, i32 1, i32 7, i32 6, i32 0, i32 1>
+  %s = shufflevector <8 x i64> %x, <8 x i64> poison, <8 x i32> <i32 1, i32 2, i32 0, i32 1, i32 7, i32 6, i32 0, i32 1>
   ret <8 x i64> %s
 }
 
@@ -182,7 +182,7 @@ define <8 x i64> @vrgather_permute_shuffle_uv_v8i64(<8 x i64> %x) {
 ; RV64-NEXT:    vrgather.vv v12, v8, v16
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %s = shufflevector <8 x i64> undef, <8 x i64> %x, <8 x i32> <i32 9, i32 10, i32 8, i32 9, i32 15, i32 8, i32 8, i32 11>
+  %s = shufflevector <8 x i64> poison, <8 x i64> %x, <8 x i32> <i32 9, i32 10, i32 8, i32 9, i32 15, i32 8, i32 8, i32 11>
   ret <8 x i64> %s
 }
 
@@ -322,8 +322,8 @@ define <4 x i8> @interleave_shuffles(<4 x i8> %x) {
 ; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
 ; CHECK-NEXT:    ret
-  %y = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
-  %z = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %y = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+  %z = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %w = shufflevector <4 x i8> %y, <4 x i8> %z, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   ret <4 x i8> %w
 }
@@ -531,7 +531,7 @@ define <8 x i8> @widen_splat_ve3(<4 x i8> %v) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 3
 ; CHECK-NEXT:    ret
-  %shuf = shufflevector <4 x i8> %v, <4 x i8> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  %shuf = shufflevector <4 x i8> %v, <4 x i8> poison, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
   ret <8 x i8> %shuf
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index 194bf63ccc55..a3396c09312e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -13,8 +13,8 @@ define void @splat_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x i8> undef, i8 %y, i32 0
-  %b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x i8> poison, i8 %y, i32 0
+  %b = shufflevector <16 x i8> %a, <16 x i8> poison, <16 x i32> zeroinitializer
   store <16 x i8> %b, <16 x i8>* %x
   ret void
 }
@@ -26,8 +26,8 @@ define void @splat_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i16> undef, i16 %y, i32 0
-  %b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x i16> poison, i16 %y, i32 0
+  %b = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> zeroinitializer
   store <8 x i16> %b, <8 x i16>* %x
   ret void
 }
@@ -39,8 +39,8 @@ define void @splat_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i32> undef, i32 %y, i32 0
-  %b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x i32> poison, i32 %y, i32 0
+  %b = shufflevector <4 x i32> %a, <4 x i32> poison, <4 x i32> zeroinitializer
   store <4 x i32> %b, <4 x i32>* %x
   ret void
 }
@@ -105,8 +105,8 @@ define void @splat_v2i64(<2 x i64>* %x, i64 %y) {
 ; LMULMAX1-RV64-NEXT:    vmv.v.x v8, a1
 ; LMULMAX1-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
-  %a = insertelement <2 x i64> undef, i64 %y, i32 0
-  %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer
+  %a = insertelement <2 x i64> poison, i64 %y, i32 0
+  %b = shufflevector <2 x i64> %a, <2 x i64> poison, <2 x i32> zeroinitializer
   store <2 x i64> %b, <2 x i64>* %x
   ret void
 }
@@ -136,8 +136,8 @@ define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
 ; LMULMAX1-NEXT:    vse8.v v8, (a1)
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <32 x i8> undef, i8 %y, i32 0
-  %b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
+  %a = insertelement <32 x i8> poison, i8 %y, i32 0
+  %b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
   store <32 x i8> %b, <32 x i8>* %x
   ret void
 }
@@ -165,8 +165,8 @@ define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
 ; LMULMAX1-NEXT:    vse16.v v8, (a1)
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <16 x i16> undef, i16 %y, i32 0
-  %b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x i16> poison, i16 %y, i32 0
+  %b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
   store <16 x i16> %b, <16 x i16>* %x
   ret void
 }
@@ -194,8 +194,8 @@ define void @splat_v8i32(<8 x i32>* %x, i32 %y) {
 ; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <8 x i32> undef, i32 %y, i32 0
-  %b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x i32> poison, i32 %y, i32 0
+  %b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
   store <8 x i32> %b, <8 x i32>* %x
   ret void
 }
@@ -262,8 +262,8 @@ define void @splat_v4i64(<4 x i64>* %x, i64 %y) {
 ; LMULMAX1-RV64-NEXT:    vse64.v v8, (a1)
 ; LMULMAX1-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
-  %a = insertelement <4 x i64> undef, i64 %y, i32 0
-  %b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x i64> poison, i64 %y, i32 0
+  %b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
   store <4 x i64> %b, <4 x i64>* %x
   ret void
 }
@@ -275,8 +275,8 @@ define void @splat_zero_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x i8> undef, i8 0, i32 0
-  %b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x i8> poison, i8 0, i32 0
+  %b = shufflevector <16 x i8> %a, <16 x i8> poison, <16 x i32> zeroinitializer
   store <16 x i8> %b, <16 x i8>* %x
   ret void
 }
@@ -288,8 +288,8 @@ define void @splat_zero_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i16> undef, i16 0, i32 0
-  %b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x i16> poison, i16 0, i32 0
+  %b = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> zeroinitializer
   store <8 x i16> %b, <8 x i16>* %x
   ret void
 }
@@ -301,8 +301,8 @@ define void @splat_zero_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i32> undef, i32 0, i32 0
-  %b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x i32> poison, i32 0, i32 0
+  %b = shufflevector <4 x i32> %a, <4 x i32> poison, <4 x i32> zeroinitializer
   store <4 x i32> %b, <4 x i32>* %x
   ret void
 }
@@ -314,8 +314,8 @@ define void @splat_zero_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x i64> undef, i64 0, i32 0
-  %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer
+  %a = insertelement <2 x i64> poison, i64 0, i32 0
+  %b = shufflevector <2 x i64> %a, <2 x i64> poison, <2 x i32> zeroinitializer
   store <2 x i64> %b, <2 x i64>* %x
   ret void
 }
@@ -345,8 +345,8 @@ define void @splat_zero_v32i8(<32 x i8>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <32 x i8> undef, i8 0, i32 0
-  %b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
+  %a = insertelement <32 x i8> poison, i8 0, i32 0
+  %b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
   store <32 x i8> %b, <32 x i8>* %x
   ret void
 }
@@ -374,8 +374,8 @@ define void @splat_zero_v16i16(<16 x i16>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <16 x i16> undef, i16 0, i32 0
-  %b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x i16> poison, i16 0, i32 0
+  %b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
   store <16 x i16> %b, <16 x i16>* %x
   ret void
 }
@@ -403,8 +403,8 @@ define void @splat_zero_v8i32(<8 x i32>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <8 x i32> undef, i32 0, i32 0
-  %b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x i32> poison, i32 0, i32 0
+  %b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
   store <8 x i32> %b, <8 x i32>* %x
   ret void
 }
@@ -441,8 +441,8 @@ define void @splat_zero_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    addi a0, a0, 16
 ; LMULMAX1-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
-  %a = insertelement <4 x i64> undef, i64 0, i32 0
-  %b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x i64> poison, i64 0, i32 0
+  %b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
   store <4 x i64> %b, <4 x i64>* %x
   ret void
 }
@@ -454,8 +454,8 @@ define void @splat_allones_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x i8> undef, i8 -1, i32 0
-  %b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x i8> poison, i8 -1, i32 0
+  %b = shufflevector <16 x i8> %a, <16 x i8> poison, <16 x i32> zeroinitializer
   store <16 x i8> %b, <16 x i8>* %x
   ret void
 }
@@ -467,8 +467,8 @@ define void @splat_allones_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i16> undef, i16 -1, i32 0
-  %b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x i16> poison, i16 -1, i32 0
+  %b = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> zeroinitializer
   store <8 x i16> %b, <8 x i16>* %x
   ret void
 }
@@ -480,8 +480,8 @@ define void @splat_allones_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i32> undef, i32 -1, i32 0
-  %b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x i32> poison, i32 -1, i32 0
+  %b = shufflevector <4 x i32> %a, <4 x i32> poison, <4 x i32> zeroinitializer
   store <4 x i32> %b, <4 x i32>* %x
   ret void
 }
@@ -493,8 +493,8 @@ define void @splat_allones_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x i64> undef, i64 -1, i32 0
-  %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer
+  %a = insertelement <2 x i64> poison, i64 -1, i32 0
+  %b = shufflevector <2 x i64> %a, <2 x i64> poison, <2 x i32> zeroinitializer
   store <2 x i64> %b, <2 x i64>* %x
   ret void
 }
@@ -524,8 +524,8 @@ define void @splat_allones_v32i8(<32 x i8>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <32 x i8> undef, i8 -1, i32 0
-  %b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
+  %a = insertelement <32 x i8> poison, i8 -1, i32 0
+  %b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
   store <32 x i8> %b, <32 x i8>* %x
   ret void
 }
@@ -553,8 +553,8 @@ define void @splat_allones_v16i16(<16 x i16>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <16 x i16> undef, i16 -1, i32 0
-  %b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x i16> poison, i16 -1, i32 0
+  %b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
   store <16 x i16> %b, <16 x i16>* %x
   ret void
 }
@@ -582,8 +582,8 @@ define void @splat_allones_v8i32(<8 x i32>* %x) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
-  %a = insertelement <8 x i32> undef, i32 -1, i32 0
-  %b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x i32> poison, i32 -1, i32 0
+  %b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
   store <8 x i32> %b, <8 x i32>* %x
   ret void
 }
@@ -620,8 +620,8 @@ define void @splat_allones_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    addi a0, a0, 16
 ; LMULMAX1-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
-  %a = insertelement <4 x i64> undef, i64 -1, i32 0
-  %b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x i64> poison, i64 -1, i32 0
+  %b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
   store <4 x i64> %b, <4 x i64>* %x
   ret void
 }
@@ -853,8 +853,8 @@ define void @vadd_vx_v16i64(<16 x i64>* %a, i64 %b, <16 x i64>* %c) {
 ; LMULMAX1-RV64-NEXT:    vse64.v v13, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
   %va = load <16 x i64>, <16 x i64>* %a
-  %head = insertelement <16 x i64> undef, i64 %b, i32 0
-  %splat = shufflevector <16 x i64> %head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <16 x i64> %head, <16 x i64> poison, <16 x i32> zeroinitializer
   %vc = add <16 x i64> %va, %splat
   store <16 x i64> %vc, <16 x i64>* %c
   ret void

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
index 576de147329c..c2637891d42b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
@@ -14,8 +14,8 @@ define void @gather_const_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = extractelement <16 x i8> %a, i32 12
-  %c = insertelement <16 x i8> undef, i8 %b, i32 0
-  %d = shufflevector <16 x i8> %c, <16 x i8> undef, <16 x i32> zeroinitializer
+  %c = insertelement <16 x i8> poison, i8 %b, i32 0
+  %d = shufflevector <16 x i8> %c, <16 x i8> poison, <16 x i32> zeroinitializer
   store <16 x i8> %d, <16 x i8>* %x
   ret void
 }
@@ -30,8 +30,8 @@ define void @gather_const_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
   %b = extractelement <8 x i16> %a, i32 5
-  %c = insertelement <8 x i16> undef, i16 %b, i32 0
-  %d = shufflevector <8 x i16> %c, <8 x i16> undef, <8 x i32> zeroinitializer
+  %c = insertelement <8 x i16> poison, i16 %b, i32 0
+  %d = shufflevector <8 x i16> %c, <8 x i16> poison, <8 x i32> zeroinitializer
   store <8 x i16> %d, <8 x i16>* %x
   ret void
 }
@@ -46,8 +46,8 @@ define void @gather_const_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
   %b = extractelement <4 x i32> %a, i32 3
-  %c = insertelement <4 x i32> undef, i32 %b, i32 0
-  %d = shufflevector <4 x i32> %c, <4 x i32> undef, <4 x i32> zeroinitializer
+  %c = insertelement <4 x i32> poison, i32 %b, i32 0
+  %d = shufflevector <4 x i32> %c, <4 x i32> poison, <4 x i32> zeroinitializer
   store <4 x i32> %d, <4 x i32>* %x
   ret void
 }
@@ -62,8 +62,8 @@ define void @gather_const_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
   %b = extractelement <2 x i64> %a, i32 1
-  %c = insertelement <2 x i64> undef, i64 %b, i32 0
-  %d = shufflevector <2 x i64> %c, <2 x i64> undef, <2 x i32> zeroinitializer
+  %c = insertelement <2 x i64> poison, i64 %b, i32 0
+  %d = shufflevector <2 x i64> %c, <2 x i64> poison, <2 x i32> zeroinitializer
   store <2 x i64> %d, <2 x i64>* %x
   ret void
 }
@@ -92,8 +92,8 @@ define void @gather_const_v64i8(<64 x i8>* %x) {
 ; LMULMAX1-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
   %b = extractelement <64 x i8> %a, i32 32
-  %c = insertelement <64 x i8> undef, i8 %b, i32 0
-  %d = shufflevector <64 x i8> %c, <64 x i8> undef, <64 x i32> zeroinitializer
+  %c = insertelement <64 x i8> poison, i8 %b, i32 0
+  %d = shufflevector <64 x i8> %c, <64 x i8> poison, <64 x i32> zeroinitializer
   store <64 x i8> %d, <64 x i8>* %x
   ret void
 }
@@ -123,8 +123,8 @@ define void @gather_const_v16i16(<32 x i16>* %x) {
 ; LMULMAX1-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %x
   %b = extractelement <32 x i16> %a, i32 25
-  %c = insertelement <32 x i16> undef, i16 %b, i32 0
-  %d = shufflevector <32 x i16> %c, <32 x i16> undef, <32 x i32> zeroinitializer
+  %c = insertelement <32 x i16> poison, i16 %b, i32 0
+  %d = shufflevector <32 x i16> %c, <32 x i16> poison, <32 x i32> zeroinitializer
   store <32 x i16> %d, <32 x i16>* %x
   ret void
 }
@@ -153,8 +153,8 @@ define void @gather_const_v16i32(<16 x i32>* %x) {
 ; LMULMAX1-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %x
   %b = extractelement <16 x i32> %a, i32 9
-  %c = insertelement <16 x i32> undef, i32 %b, i32 0
-  %d = shufflevector <16 x i32> %c, <16 x i32> undef, <16 x i32> zeroinitializer
+  %c = insertelement <16 x i32> poison, i32 %b, i32 0
+  %d = shufflevector <16 x i32> %c, <16 x i32> poison, <16 x i32> zeroinitializer
   store <16 x i32> %d, <16 x i32>* %x
   ret void
 }
@@ -183,8 +183,8 @@ define void @gather_const_v8i64(<8 x i64>* %x) {
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
   %b = extractelement <8 x i64> %a, i32 3
-  %c = insertelement <8 x i64> undef, i64 %b, i32 0
-  %d = shufflevector <8 x i64> %c, <8 x i64> undef, <8 x i32> zeroinitializer
+  %c = insertelement <8 x i64> poison, i64 %b, i32 0
+  %d = shufflevector <8 x i64> %c, <8 x i64> poison, <8 x i32> zeroinitializer
   store <8 x i64> %d, <8 x i64>* %x
   ret void
 }
@@ -200,7 +200,7 @@ define void @splat_concat_low(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) {
   %a = load <4 x i16>, <4 x i16>* %x
   %b = load <4 x i16>, <4 x i16>* %y
   %c = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %d = shufflevector <8 x i16> %c, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %d = shufflevector <8 x i16> %c, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   store <8 x i16> %d, <8 x i16>* %z
   ret void
 }
@@ -216,7 +216,7 @@ define void @splat_concat_high(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) {
   %a = load <4 x i16>, <4 x i16>* %x
   %b = load <4 x i16>, <4 x i16>* %y
   %c = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %d = shufflevector <8 x i16> %c, <8 x i16> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+  %d = shufflevector <8 x i16> %c, <8 x i16> poison, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   store <8 x i16> %d, <8 x i16>* %z
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 2b3138cc76e7..a0e494fff477 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -5501,8 +5501,8 @@ define void @add_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 -1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 -1, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = add <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5517,8 +5517,8 @@ define void @add_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 -1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 -1, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = add <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5533,8 +5533,8 @@ define void @add_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 -1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 -1, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = add <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -5549,8 +5549,8 @@ define void @add_vi_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 -1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 -1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = add <2 x i64> %a, %c
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -5565,8 +5565,8 @@ define void @add_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 1, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = add <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5581,8 +5581,8 @@ define void @add_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 1, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = add <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5597,8 +5597,8 @@ define void @add_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 1, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = add <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -5613,8 +5613,8 @@ define void @add_iv_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = add <2 x i64> %c, %a
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -5629,8 +5629,8 @@ define void @add_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = add <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5645,8 +5645,8 @@ define void @add_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = add <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5661,8 +5661,8 @@ define void @add_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = add <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -5677,8 +5677,8 @@ define void @add_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = add <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5693,8 +5693,8 @@ define void @add_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = add <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5709,8 +5709,8 @@ define void @add_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = add <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -5726,8 +5726,8 @@ define void @sub_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 -1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 -1, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = sub <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5743,8 +5743,8 @@ define void @sub_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 -1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 -1, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = sub <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5760,8 +5760,8 @@ define void @sub_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 -1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 -1, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = sub <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -5777,8 +5777,8 @@ define void @sub_vi_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 -1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 -1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = sub <2 x i64> %a, %c
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -5793,8 +5793,8 @@ define void @sub_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 1, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = sub <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5809,8 +5809,8 @@ define void @sub_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 1, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = sub <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5825,8 +5825,8 @@ define void @sub_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 1, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = sub <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -5841,8 +5841,8 @@ define void @sub_iv_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = sub <2 x i64> %c, %a
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -5857,8 +5857,8 @@ define void @sub_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = sub <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5873,8 +5873,8 @@ define void @sub_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = sub <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5889,8 +5889,8 @@ define void @sub_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = sub <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -5905,8 +5905,8 @@ define void @sub_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = sub <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5921,8 +5921,8 @@ define void @sub_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = sub <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5937,8 +5937,8 @@ define void @sub_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = sub <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -5953,8 +5953,8 @@ define void @mul_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = mul <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -5969,8 +5969,8 @@ define void @mul_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = mul <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -5985,8 +5985,8 @@ define void @mul_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = mul <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6001,8 +6001,8 @@ define void @mul_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = mul <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6017,8 +6017,8 @@ define void @mul_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = mul <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6033,8 +6033,8 @@ define void @mul_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = mul <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6049,8 +6049,8 @@ define void @and_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 -2, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 -2, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = and <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6065,8 +6065,8 @@ define void @and_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 -2, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 -2, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = and <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6081,8 +6081,8 @@ define void @and_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 -2, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 -2, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = and <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6097,8 +6097,8 @@ define void @and_vi_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 -2, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 -2, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = and <2 x i64> %a, %c
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -6113,8 +6113,8 @@ define void @and_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 1, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = and <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6129,8 +6129,8 @@ define void @and_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 1, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = and <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6145,8 +6145,8 @@ define void @and_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 1, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = and <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6161,8 +6161,8 @@ define void @and_iv_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = and <2 x i64> %c, %a
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -6177,8 +6177,8 @@ define void @and_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = and <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6193,8 +6193,8 @@ define void @and_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = and <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6209,8 +6209,8 @@ define void @and_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = and <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6225,8 +6225,8 @@ define void @and_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = and <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6241,8 +6241,8 @@ define void @and_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = and <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6257,8 +6257,8 @@ define void @and_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = and <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6273,8 +6273,8 @@ define void @or_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 -2, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 -2, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = or <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6289,8 +6289,8 @@ define void @or_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 -2, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 -2, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = or <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6305,8 +6305,8 @@ define void @or_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 -2, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 -2, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = or <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6321,8 +6321,8 @@ define void @or_vi_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 -2, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 -2, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = or <2 x i64> %a, %c
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -6337,8 +6337,8 @@ define void @or_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 1, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = or <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6353,8 +6353,8 @@ define void @or_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 1, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = or <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6369,8 +6369,8 @@ define void @or_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 1, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = or <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6385,8 +6385,8 @@ define void @or_iv_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = or <2 x i64> %c, %a
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -6401,8 +6401,8 @@ define void @or_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = or <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6417,8 +6417,8 @@ define void @or_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = or <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6433,8 +6433,8 @@ define void @or_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = or <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6449,8 +6449,8 @@ define void @or_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = or <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6465,8 +6465,8 @@ define void @or_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = or <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6481,8 +6481,8 @@ define void @or_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = or <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6497,8 +6497,8 @@ define void @xor_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 -1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 -1, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = xor <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6513,8 +6513,8 @@ define void @xor_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 -1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 -1, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = xor <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6529,8 +6529,8 @@ define void @xor_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 -1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 -1, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = xor <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6545,8 +6545,8 @@ define void @xor_vi_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 -1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 -1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = xor <2 x i64> %a, %c
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -6561,8 +6561,8 @@ define void @xor_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 1, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = xor <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6577,8 +6577,8 @@ define void @xor_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 1, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = xor <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6593,8 +6593,8 @@ define void @xor_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 1, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = xor <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6609,8 +6609,8 @@ define void @xor_iv_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = xor <2 x i64> %c, %a
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -6625,8 +6625,8 @@ define void @xor_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = xor <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6641,8 +6641,8 @@ define void @xor_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = xor <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6657,8 +6657,8 @@ define void @xor_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = xor <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6673,8 +6673,8 @@ define void @xor_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = xor <16 x i8> %c, %a
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6689,8 +6689,8 @@ define void @xor_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = xor <8 x i16> %c, %a
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6705,8 +6705,8 @@ define void @xor_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = xor <4 x i32> %c, %a
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6721,8 +6721,8 @@ define void @lshr_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 7, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 7, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = lshr <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6737,8 +6737,8 @@ define void @lshr_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 15, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 15, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = lshr <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6753,8 +6753,8 @@ define void @lshr_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 31, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 31, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = lshr <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6769,8 +6769,8 @@ define void @lshr_vi_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 31, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 31, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = lshr <2 x i64> %a, %c
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -6785,8 +6785,8 @@ define void @lshr_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = lshr <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6801,8 +6801,8 @@ define void @lshr_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = lshr <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6817,8 +6817,8 @@ define void @lshr_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = lshr <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6833,8 +6833,8 @@ define void @ashr_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 7, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 7, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = ashr <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6849,8 +6849,8 @@ define void @ashr_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 15, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 15, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = ashr <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6865,8 +6865,8 @@ define void @ashr_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 31, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 31, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = ashr <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6881,8 +6881,8 @@ define void @ashr_vi_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 31, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 31, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = ashr <2 x i64> %a, %c
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -6897,8 +6897,8 @@ define void @ashr_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = ashr <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6913,8 +6913,8 @@ define void @ashr_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = ashr <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6929,8 +6929,8 @@ define void @ashr_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = ashr <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6945,8 +6945,8 @@ define void @shl_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 7, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 7, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = shl <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -6961,8 +6961,8 @@ define void @shl_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 15, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 15, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = shl <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -6977,8 +6977,8 @@ define void @shl_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 31, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 31, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = shl <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -6993,8 +6993,8 @@ define void @shl_vi_v2i64(<2 x i64>* %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
-  %b = insertelement <2 x i64> undef, i64 31, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i64> poison, i64 31, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
   %d = shl <2 x i64> %a, %c
   store <2 x i64> %d, <2 x i64>* %x
   ret void
@@ -7009,8 +7009,8 @@ define void @shl_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = shl <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -7025,8 +7025,8 @@ define void @shl_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = shl <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -7041,8 +7041,8 @@ define void @shl_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = shl <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -7057,8 +7057,8 @@ define void @sdiv_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = sdiv <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -7073,8 +7073,8 @@ define void @sdiv_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = sdiv <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -7089,8 +7089,8 @@ define void @sdiv_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = sdiv <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -7105,8 +7105,8 @@ define void @srem_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = srem <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -7121,8 +7121,8 @@ define void @srem_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = srem <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -7137,8 +7137,8 @@ define void @srem_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = srem <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -7153,8 +7153,8 @@ define void @udiv_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = udiv <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -7169,8 +7169,8 @@ define void @udiv_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = udiv <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -7185,8 +7185,8 @@ define void @udiv_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = udiv <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void
@@ -7201,8 +7201,8 @@ define void @urem_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = urem <16 x i8> %a, %c
   store <16 x i8> %d, <16 x i8>* %x
   ret void
@@ -7217,8 +7217,8 @@ define void @urem_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = urem <8 x i16> %a, %c
   store <8 x i16> %d, <8 x i16>* %x
   ret void
@@ -7233,8 +7233,8 @@ define void @urem_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i32 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = urem <4 x i32> %a, %c
   store <4 x i32> %d, <4 x i32>* %x
   ret void

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index 6dffc1bd6fd4..875eff21ea7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -71,7 +71,7 @@ define <1 x i1> @buildvec_mask_nonconst_v1i1(i1 %x) {
 ; RV64-ELEN8-NEXT:    vmv.v.x v8, a0
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <1 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <1 x i1> poison, i1 %x, i32 0
   ret <1 x i1> %1
 }
 
@@ -131,7 +131,7 @@ define <1 x i1> @buildvec_mask_optsize_nonconst_v1i1(i1 %x) optsize {
 ; RV64-ELEN8-NEXT:    vmv.v.x v8, a0
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <1 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <1 x i1> poison, i1 %x, i32 0
   ret <1 x i1> %1
 }
 
@@ -212,7 +212,7 @@ define <2 x i1> @buildvec_mask_nonconst_v2i1(i1 %x, i1 %y) {
 ; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <2 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <2 x i1> poison, i1 %x, i32 0
   %2 = insertelement <2 x i1> %1,  i1 %y, i32 1
   ret <2 x i1> %2
 }
@@ -316,7 +316,7 @@ define <2 x i1> @buildvec_mask_optsize_nonconst_v2i1(i1 %x, i1 %y) optsize {
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    addi sp, sp, 16
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <2 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <2 x i1> poison, i1 %x, i32 0
   %2 = insertelement <2 x i1> %1,  i1 %y, i32 1
   ret <2 x i1> %2
 }
@@ -561,7 +561,7 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1(i1 %x, i1 %y) {
 ; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <4 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <4 x i1> poison, i1 %x, i32 0
   %2 = insertelement <4 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <4 x i1> %2,  i1 %y, i32 2
   %4 = insertelement <4 x i1> %3,  i1 %y, i32 3
@@ -681,7 +681,7 @@ define <4 x i1> @buildvec_mask_optsize_nonconst_v4i1(i1 %x, i1 %y) optsize {
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    addi sp, sp, 16
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <4 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <4 x i1> poison, i1 %x, i32 0
   %2 = insertelement <4 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <4 x i1> %2,  i1 %y, i32 2
   %4 = insertelement <4 x i1> %3,  i1 %y, i32 3
@@ -807,7 +807,7 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1_2(i1 %x, i1 %y) {
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    addi sp, sp, 16
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <4 x i1> undef, i1 0, i32 0
+  %1 = insertelement <4 x i1> poison, i1 0, i32 0
   %2 = insertelement <4 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <4 x i1> %2,  i1  1, i32 2
   %4 = insertelement <4 x i1> %3,  i1 %y, i32 3
@@ -950,7 +950,7 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
 ; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <8 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <8 x i1> poison, i1 %x, i32 0
   %2 = insertelement <8 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <8 x i1> %2,  i1 %y, i32 2
   %4 = insertelement <8 x i1> %3,  i1 %y, i32 3
@@ -1108,7 +1108,7 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    addi sp, sp, 16
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <8 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <8 x i1> poison, i1 %x, i32 0
   %2 = insertelement <8 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <8 x i1> %2,  i1  1, i32 2
   %4 = insertelement <8 x i1> %3,  i1 %y, i32 3
@@ -1266,7 +1266,7 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    addi sp, sp, 16
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <8 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <8 x i1> poison, i1 %x, i32 0
   %2 = insertelement <8 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <8 x i1> %2,  i1  1, i32 2
   %4 = insertelement <8 x i1> %3,  i1 %y, i32 3
@@ -1417,7 +1417,7 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
 ; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-ELEN8-NEXT:    addi sp, sp, 16
 ; RV64-ELEN8-NEXT:    ret
-  %1 = insertelement <8 x i1> undef, i1 %x, i32 0
+  %1 = insertelement <8 x i1> poison, i1 %x, i32 0
   %2 = insertelement <8 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <8 x i1> %2,  i1 %y, i32 2
   %4 = insertelement <8 x i1> %3,  i1 %y, i32 3

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
index 21cb21cae767..bfe695c553b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
@@ -59,8 +59,8 @@ define void @splat_v1i1(<1 x i1>* %x, i1 %y) {
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
 ; CHECK-NEXT:    vsm.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <1 x i1> undef, i1 %y, i32 0
-  %b = shufflevector <1 x i1> %a, <1 x i1> undef, <1 x i32> zeroinitializer
+  %a = insertelement <1 x i1> poison, i1 %y, i32 0
+  %b = shufflevector <1 x i1> %a, <1 x i1> poison, <1 x i32> zeroinitializer
   store <1 x i1> %b, <1 x i1>* %x
   ret void
 }
@@ -84,8 +84,8 @@ define void @splat_v1i1_icmp(<1 x i1>* %x, i32 signext %y, i32 signext %z) {
 ; CHECK-NEXT:    vsm.v v8, (a0)
 ; CHECK-NEXT:    ret
   %c = icmp eq i32 %y, %z
-  %a = insertelement <1 x i1> undef, i1 %c, i32 0
-  %b = shufflevector <1 x i1> %a, <1 x i1> undef, <1 x i32> zeroinitializer
+  %a = insertelement <1 x i1> poison, i1 %c, i32 0
+  %b = shufflevector <1 x i1> %a, <1 x i1> poison, <1 x i32> zeroinitializer
   store <1 x i1> %b, <1 x i1>* %x
   ret void
 }
@@ -126,8 +126,8 @@ define void @splat_v4i1(<4 x i1>* %x, i1 %y) {
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
 ; CHECK-NEXT:    vsm.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i1> undef, i1 %y, i32 0
-  %b = shufflevector <4 x i1> %a, <4 x i1> undef, <4 x i32> zeroinitializer
+  %a = insertelement <4 x i1> poison, i1 %y, i32 0
+  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
   store <4 x i1> %b, <4 x i1>* %x
   ret void
 }
@@ -152,8 +152,8 @@ define void @splat_v8i1(<8 x i1>* %x, i1 %y) {
 ; CHECK-NEXT:    vmsne.vi v8, v8, 0
 ; CHECK-NEXT:    vsm.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i1> undef, i1 %y, i32 0
-  %b = shufflevector <8 x i1> %a, <8 x i1> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x i1> poison, i1 %y, i32 0
+  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
   store <8 x i1> %b, <8 x i1>* %x
   ret void
 }
@@ -178,8 +178,8 @@ define void @splat_v16i1(<16 x i1>* %x, i1 %y) {
 ; CHECK-NEXT:    vmsne.vi v8, v8, 0
 ; CHECK-NEXT:    vsm.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x i1> undef, i1 %y, i32 0
-  %b = shufflevector <16 x i1> %a, <16 x i1> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x i1> poison, i1 %y, i32 0
+  %b = shufflevector <16 x i1> %a, <16 x i1> poison, <16 x i32> zeroinitializer
   store <16 x i1> %b, <16 x i1>* %x
   ret void
 }
@@ -246,8 +246,8 @@ define void @splat_v32i1(<32 x i1>* %x, i1 %y) {
 ; LMULMAX1-RV64-NEXT:    vsm.v v8, (a1)
 ; LMULMAX1-RV64-NEXT:    vsm.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
-  %a = insertelement <32 x i1> undef, i1 %y, i32 0
-  %b = shufflevector <32 x i1> %a, <32 x i1> undef, <32 x i32> zeroinitializer
+  %a = insertelement <32 x i1> poison, i1 %y, i32 0
+  %b = shufflevector <32 x i1> %a, <32 x i1> poison, <32 x i32> zeroinitializer
   store <32 x i1> %b, <32 x i1>* %x
   ret void
 }
@@ -334,8 +334,8 @@ define void @splat_v64i1(<64 x i1>* %x, i1 %y) {
 ; LMULMAX1-RV64-NEXT:    vsm.v v8, (a1)
 ; LMULMAX1-RV64-NEXT:    vsm.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
-  %a = insertelement <64 x i1> undef, i1 %y, i32 0
-  %b = shufflevector <64 x i1> %a, <64 x i1> undef, <64 x i32> zeroinitializer
+  %a = insertelement <64 x i1> poison, i1 %y, i32 0
+  %b = shufflevector <64 x i1> %a, <64 x i1> poison, <64 x i32> zeroinitializer
   store <64 x i1> %b, <64 x i1>* %x
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 9cd6b26dc151..05202fc4968f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -204,8 +204,8 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x i8*> %ptrs, <4 x i8> %passthru) {
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> %mtrue, <4 x i8> %passthru)
   ret <4 x i8> %v
 }
@@ -425,8 +425,8 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x i16*> %ptrs, <4 x i16> %passthru)
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> %mtrue, <4 x i16> %passthru)
   ret <4 x i16> %v
 }
@@ -682,8 +682,8 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x i32*> %ptrs, <4 x i32> %passthru)
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> %mtrue, <4 x i32> %passthru)
   ret <4 x i32> %v
 }
@@ -966,8 +966,8 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x i64*> %ptrs, <4 x i64> %passthru)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vluxei64.v v8, (zero), v8
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, i32 8, <4 x i1> %mtrue, <4 x i64> %passthru)
   ret <4 x i64> %v
 }
@@ -1337,8 +1337,8 @@ define <4 x half> @mgather_truemask_v4f16(<4 x half*> %ptrs, <4 x half> %passthr
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*> %ptrs, i32 2, <4 x i1> %mtrue, <4 x half> %passthru)
   ret <4 x half> %v
 }
@@ -1552,8 +1552,8 @@ define <4 x float> @mgather_truemask_v4f32(<4 x float*> %ptrs, <4 x float> %pass
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> %mtrue, <4 x float> %passthru)
   ret <4 x float> %v
 }
@@ -1836,8 +1836,8 @@ define <4 x double> @mgather_truemask_v4f64(<4 x double*> %ptrs, <4 x double> %p
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vluxei64.v v8, (zero), v8
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 8, <4 x i1> %mtrue, <4 x double> %passthru)
   ret <4 x double> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 66cba4d3f755..0fd0c6869cd8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -139,8 +139,8 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs) {
 ; RV64-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 1, <4 x i1> %mtrue)
   ret void
 }
@@ -304,8 +304,8 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs) {
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, i32 2, <4 x i1> %mtrue)
   ret void
 }
@@ -519,8 +519,8 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs) {
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 4, <4 x i1> %mtrue)
   ret void
 }
@@ -778,8 +778,8 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs) {
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   call void @llvm.masked.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, i32 8, <4 x i1> %mtrue)
   ret void
 }
@@ -1117,8 +1117,8 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x half*> %ptrs) {
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   call void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, i32 2, <4 x i1> %mtrue)
   ret void
 }
@@ -1313,8 +1313,8 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x float*> %ptrs) {
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, i32 4, <4 x i1> %mtrue)
   ret void
 }
@@ -1572,8 +1572,8 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x double*> %ptrs) {
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
   call void @llvm.masked.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, i32 8, <4 x i1> %mtrue)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll
index b7d76815e7d3..2ab1b6c35c8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll
@@ -22,8 +22,8 @@ define <2 x half> @vfmax_v2f16_vf(<2 x half> %a, half %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x half> undef, half %b, i32 0
-  %splat = shufflevector <2 x half> %head, <2 x half> undef, <2 x i32> zeroinitializer
+  %head = insertelement <2 x half> poison, half %b, i32 0
+  %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer
   %v = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %splat)
   ret <2 x half> %v
 }
@@ -46,8 +46,8 @@ define <4 x half> @vfmax_v4f16_vf(<4 x half> %a, half %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x half> undef, half %b, i32 0
-  %splat = shufflevector <4 x half> %head, <4 x half> undef, <4 x i32> zeroinitializer
+  %head = insertelement <4 x half> poison, half %b, i32 0
+  %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer
   %v = call <4 x half> @llvm.maxnum.v4f16(<4 x half> %a, <4 x half> %splat)
   ret <4 x half> %v
 }
@@ -70,8 +70,8 @@ define <8 x half> @vfmax_v8f16_vf(<8 x half> %a, half %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x half> undef, half %b, i32 0
-  %splat = shufflevector <8 x half> %head, <8 x half> undef, <8 x i32> zeroinitializer
+  %head = insertelement <8 x half> poison, half %b, i32 0
+  %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer
   %v = call <8 x half> @llvm.maxnum.v8f16(<8 x half> %a, <8 x half> %splat)
   ret <8 x half> %v
 }
@@ -94,8 +94,8 @@ define <16 x half> @vfmax_v16f16_vf(<16 x half> %a, half %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x half> undef, half %b, i32 0
-  %splat = shufflevector <16 x half> %head, <16 x half> undef, <16 x i32> zeroinitializer
+  %head = insertelement <16 x half> poison, half %b, i32 0
+  %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer
   %v = call <16 x half> @llvm.maxnum.v16f16(<16 x half> %a, <16 x half> %splat)
   ret <16 x half> %v
 }
@@ -118,8 +118,8 @@ define <2 x float> @vfmax_v2f32_vf(<2 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x float> undef, float %b, i32 0
-  %splat = shufflevector <2 x float> %head, <2 x float> undef, <2 x i32> zeroinitializer
+  %head = insertelement <2 x float> poison, float %b, i32 0
+  %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %a, <2 x float> %splat)
   ret <2 x float> %v
 }
@@ -142,8 +142,8 @@ define <4 x float> @vfmax_v4f32_vf(<4 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x float> undef, float %b, i32 0
-  %splat = shufflevector <4 x float> %head, <4 x float> undef, <4 x i32> zeroinitializer
+  %head = insertelement <4 x float> poison, float %b, i32 0
+  %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %splat)
   ret <4 x float> %v
 }
@@ -166,8 +166,8 @@ define <8 x float> @vfmax_v8f32_vf(<8 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x float> undef, float %b, i32 0
-  %splat = shufflevector <8 x float> %head, <8 x float> undef, <8 x i32> zeroinitializer
+  %head = insertelement <8 x float> poison, float %b, i32 0
+  %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %a, <8 x float> %splat)
   ret <8 x float> %v
 }
@@ -190,8 +190,8 @@ define <16 x float> @vfmax_v16f32_vf(<16 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x float> undef, float %b, i32 0
-  %splat = shufflevector <16 x float> %head, <16 x float> undef, <16 x i32> zeroinitializer
+  %head = insertelement <16 x float> poison, float %b, i32 0
+  %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.maxnum.v16f32(<16 x float> %a, <16 x float> %splat)
   ret <16 x float> %v
 }
@@ -214,8 +214,8 @@ define <2 x double> @vfmax_v2f64_vf(<2 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x double> undef, double %b, i32 0
-  %splat = shufflevector <2 x double> %head, <2 x double> undef, <2 x i32> zeroinitializer
+  %head = insertelement <2 x double> poison, double %b, i32 0
+  %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %splat)
   ret <2 x double> %v
 }
@@ -238,8 +238,8 @@ define <4 x double> @vfmax_v4f64_vf(<4 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x double> undef, double %b, i32 0
-  %splat = shufflevector <4 x double> %head, <4 x double> undef, <4 x i32> zeroinitializer
+  %head = insertelement <4 x double> poison, double %b, i32 0
+  %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %a, <4 x double> %splat)
   ret <4 x double> %v
 }
@@ -262,8 +262,8 @@ define <8 x double> @vfmax_v8f64_vf(<8 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x double> undef, double %b, i32 0
-  %splat = shufflevector <8 x double> %head, <8 x double> undef, <8 x i32> zeroinitializer
+  %head = insertelement <8 x double> poison, double %b, i32 0
+  %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %splat)
   ret <8 x double> %v
 }
@@ -286,8 +286,8 @@ define <16 x double> @vfmax_v16f64_vf(<16 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x double> undef, double %b, i32 0
-  %splat = shufflevector <16 x double> %head, <16 x double> undef, <16 x i32> zeroinitializer
+  %head = insertelement <16 x double> poison, double %b, i32 0
+  %splat = shufflevector <16 x double> %head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.maxnum.v16f64(<16 x double> %a, <16 x double> %splat)
   ret <16 x double> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll
index dfdfd8b42010..e57df75afcd9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll
@@ -22,8 +22,8 @@ define <2 x half> @vfmin_v2f16_vf(<2 x half> %a, half %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x half> undef, half %b, i32 0
-  %splat = shufflevector <2 x half> %head, <2 x half> undef, <2 x i32> zeroinitializer
+  %head = insertelement <2 x half> poison, half %b, i32 0
+  %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer
   %v = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %splat)
   ret <2 x half> %v
 }
@@ -46,8 +46,8 @@ define <4 x half> @vfmin_v4f16_vf(<4 x half> %a, half %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x half> undef, half %b, i32 0
-  %splat = shufflevector <4 x half> %head, <4 x half> undef, <4 x i32> zeroinitializer
+  %head = insertelement <4 x half> poison, half %b, i32 0
+  %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer
   %v = call <4 x half> @llvm.minnum.v4f16(<4 x half> %a, <4 x half> %splat)
   ret <4 x half> %v
 }
@@ -70,8 +70,8 @@ define <8 x half> @vfmin_v8f16_vf(<8 x half> %a, half %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x half> undef, half %b, i32 0
-  %splat = shufflevector <8 x half> %head, <8 x half> undef, <8 x i32> zeroinitializer
+  %head = insertelement <8 x half> poison, half %b, i32 0
+  %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer
   %v = call <8 x half> @llvm.minnum.v8f16(<8 x half> %a, <8 x half> %splat)
   ret <8 x half> %v
 }
@@ -94,8 +94,8 @@ define <16 x half> @vfmin_v16f16_vf(<16 x half> %a, half %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x half> undef, half %b, i32 0
-  %splat = shufflevector <16 x half> %head, <16 x half> undef, <16 x i32> zeroinitializer
+  %head = insertelement <16 x half> poison, half %b, i32 0
+  %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer
   %v = call <16 x half> @llvm.minnum.v16f16(<16 x half> %a, <16 x half> %splat)
   ret <16 x half> %v
 }
@@ -118,8 +118,8 @@ define <2 x float> @vfmin_v2f32_vf(<2 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x float> undef, float %b, i32 0
-  %splat = shufflevector <2 x float> %head, <2 x float> undef, <2 x i32> zeroinitializer
+  %head = insertelement <2 x float> poison, float %b, i32 0
+  %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.minnum.v2f32(<2 x float> %a, <2 x float> %splat)
   ret <2 x float> %v
 }
@@ -142,8 +142,8 @@ define <4 x float> @vfmin_v4f32_vf(<4 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x float> undef, float %b, i32 0
-  %splat = shufflevector <4 x float> %head, <4 x float> undef, <4 x i32> zeroinitializer
+  %head = insertelement <4 x float> poison, float %b, i32 0
+  %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %splat)
   ret <4 x float> %v
 }
@@ -166,8 +166,8 @@ define <8 x float> @vfmin_v8f32_vf(<8 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x float> undef, float %b, i32 0
-  %splat = shufflevector <8 x float> %head, <8 x float> undef, <8 x i32> zeroinitializer
+  %head = insertelement <8 x float> poison, float %b, i32 0
+  %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.minnum.v8f32(<8 x float> %a, <8 x float> %splat)
   ret <8 x float> %v
 }
@@ -190,8 +190,8 @@ define <16 x float> @vfmin_v16f32_vf(<16 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x float> undef, float %b, i32 0
-  %splat = shufflevector <16 x float> %head, <16 x float> undef, <16 x i32> zeroinitializer
+  %head = insertelement <16 x float> poison, float %b, i32 0
+  %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.minnum.v16f32(<16 x float> %a, <16 x float> %splat)
   ret <16 x float> %v
 }
@@ -214,8 +214,8 @@ define <2 x double> @vfmin_v2f64_vf(<2 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x double> undef, double %b, i32 0
-  %splat = shufflevector <2 x double> %head, <2 x double> undef, <2 x i32> zeroinitializer
+  %head = insertelement <2 x double> poison, double %b, i32 0
+  %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %splat)
   ret <2 x double> %v
 }
@@ -238,8 +238,8 @@ define <4 x double> @vfmin_v4f64_vf(<4 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x double> undef, double %b, i32 0
-  %splat = shufflevector <4 x double> %head, <4 x double> undef, <4 x i32> zeroinitializer
+  %head = insertelement <4 x double> poison, double %b, i32 0
+  %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.minnum.v4f64(<4 x double> %a, <4 x double> %splat)
   ret <4 x double> %v
 }
@@ -262,8 +262,8 @@ define <8 x double> @vfmin_v8f64_vf(<8 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x double> undef, double %b, i32 0
-  %splat = shufflevector <8 x double> %head, <8 x double> undef, <8 x i32> zeroinitializer
+  %head = insertelement <8 x double> poison, double %b, i32 0
+  %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %splat)
   ret <8 x double> %v
 }
@@ -286,8 +286,8 @@ define <16 x double> @vfmin_v16f64_vf(<16 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x double> undef, double %b, i32 0
-  %splat = shufflevector <16 x double> %head, <16 x double> undef, <16 x i32> zeroinitializer
+  %head = insertelement <16 x double> poison, double %b, i32 0
+  %splat = shufflevector <16 x double> %head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.minnum.v16f64(<16 x double> %a, <16 x double> %splat)
   ret <16 x double> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll
index 84f03cc78e60..68f553d0d8fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll
@@ -8,8 +8,8 @@ define <8 x i8> @vnsra_v8i16_v8i8_scalar(<8 x i16> %x, i16 %y) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vnsra.wx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %insert = insertelement <8 x i16> undef, i16 %y, i16 0
-  %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
+  %insert = insertelement <8 x i16> poison, i16 %y, i16 0
+  %splat = shufflevector <8 x i16> %insert, <8 x i16> poison, <8 x i32> zeroinitializer
   %a = ashr <8 x i16> %x, %splat
   %b = trunc <8 x i16> %a to <8 x i8>
   ret <8 x i8> %b
@@ -21,8 +21,8 @@ define <4 x i16> @vnsra_v4i32_v4i16_scalar(<4 x i32> %x, i32 %y) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vnsra.wx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %insert = insertelement <4 x i32> undef, i32 %y, i32 0
-  %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %insert = insertelement <4 x i32> poison, i32 %y, i32 0
+  %splat = shufflevector <4 x i32> %insert, <4 x i32> poison, <4 x i32> zeroinitializer
   %a = ashr <4 x i32> %x, %splat
   %b = trunc <4 x i32> %a to <4 x i16>
   ret <4 x i16> %b
@@ -54,8 +54,8 @@ define <2 x i32> @vnsra_v2i64_v2i32_scalar(<2 x i64> %x, i64 %y) {
 ; RV64-NEXT:    vnsra.wx v25, v8, a0
 ; RV64-NEXT:    vmv1r.v v8, v25
 ; RV64-NEXT:    ret
-  %insert = insertelement <2 x i64> undef, i64 %y, i32 0
-  %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
+  %insert = insertelement <2 x i64> poison, i64 %y, i32 0
+  %splat = shufflevector <2 x i64> %insert, <2 x i64> poison, <2 x i32> zeroinitializer
   %a = ashr <2 x i64> %x, %splat
   %b = trunc <2 x i64> %a to <2 x i32>
   ret <2 x i32> %b
@@ -100,8 +100,8 @@ define <8 x i8> @vnsrl_v8i16_v8i8_scalar(<8 x i16> %x, i16 %y) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vnsrl.wx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %insert = insertelement <8 x i16> undef, i16 %y, i16 0
-  %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
+  %insert = insertelement <8 x i16> poison, i16 %y, i16 0
+  %splat = shufflevector <8 x i16> %insert, <8 x i16> poison, <8 x i32> zeroinitializer
   %a = lshr <8 x i16> %x, %splat
   %b = trunc <8 x i16> %a to <8 x i8>
   ret <8 x i8> %b
@@ -113,8 +113,8 @@ define <4 x i16> @vnsrl_v4i32_v4i16_scalar(<4 x i32> %x, i32 %y) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vnsrl.wx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %insert = insertelement <4 x i32> undef, i32 %y, i32 0
-  %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %insert = insertelement <4 x i32> poison, i32 %y, i32 0
+  %splat = shufflevector <4 x i32> %insert, <4 x i32> poison, <4 x i32> zeroinitializer
   %a = lshr <4 x i32> %x, %splat
   %b = trunc <4 x i32> %a to <4 x i16>
   ret <4 x i16> %b
@@ -146,8 +146,8 @@ define <2 x i32> @vnsrl_v2i64_v2i32_scalar(<2 x i64> %x, i64 %y) {
 ; RV64-NEXT:    vnsrl.wx v25, v8, a0
 ; RV64-NEXT:    vmv1r.v v8, v25
 ; RV64-NEXT:    ret
-  %insert = insertelement <2 x i64> undef, i64 %y, i32 0
-  %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
+  %insert = insertelement <2 x i64> poison, i64 %y, i32 0
+  %splat = shufflevector <2 x i64> %insert, <2 x i64> poison, <2 x i32> zeroinitializer
   %a = lshr <2 x i64> %x, %splat
   %b = trunc <2 x i64> %a to <2 x i32>
   ret <2 x i32> %b

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
index dac996a26259..b7c6c992ddd9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
@@ -22,8 +22,8 @@ define <2 x i8> @sadd_v2i8_vx(<2 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
   %v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
   ret <2 x i8> %v
 }
@@ -34,8 +34,8 @@ define <2 x i8> @sadd_v2i8_vi(<2 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> undef, i8 5, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i8> poison, i8 5, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
   %v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
   ret <2 x i8> %v
 }
@@ -58,8 +58,8 @@ define <4 x i8> @sadd_v4i8_vx(<4 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
   ret <4 x i8> %v
 }
@@ -70,8 +70,8 @@ define <4 x i8> @sadd_v4i8_vi(<4 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> undef, i8 5, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i8> poison, i8 5, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
   ret <4 x i8> %v
 }
@@ -94,8 +94,8 @@ define <8 x i8> @sadd_v8i8_vx(<8 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
   %v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
   ret <8 x i8> %v
 }
@@ -106,8 +106,8 @@ define <8 x i8> @sadd_v8i8_vi(<8 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> undef, i8 5, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i8> poison, i8 5, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
   %v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
   ret <8 x i8> %v
 }
@@ -130,8 +130,8 @@ define <16 x i8> @sadd_v16i8_vx(<16 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
   %v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
   ret <16 x i8> %v
 }
@@ -142,8 +142,8 @@ define <16 x i8> @sadd_v16i8_vi(<16 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> undef, i8 5, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i8> poison, i8 5, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
   %v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
   ret <16 x i8> %v
 }
@@ -166,8 +166,8 @@ define <2 x i16> @sadd_v2i16_vx(<2 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
   %v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
   ret <2 x i16> %v
 }
@@ -178,8 +178,8 @@ define <2 x i16> @sadd_v2i16_vi(<2 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> undef, i16 5, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i16> poison, i16 5, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
   %v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
   ret <2 x i16> %v
 }
@@ -202,8 +202,8 @@ define <4 x i16> @sadd_v4i16_vx(<4 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
   ret <4 x i16> %v
 }
@@ -214,8 +214,8 @@ define <4 x i16> @sadd_v4i16_vi(<4 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> undef, i16 5, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i16> poison, i16 5, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
   ret <4 x i16> %v
 }
@@ -238,8 +238,8 @@ define <8 x i16> @sadd_v8i16_vx(<8 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
   %v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
   ret <8 x i16> %v
 }
@@ -250,8 +250,8 @@ define <8 x i16> @sadd_v8i16_vi(<8 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> undef, i16 5, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i16> poison, i16 5, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
   %v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
   ret <8 x i16> %v
 }
@@ -274,8 +274,8 @@ define <16 x i16> @sadd_v16i16_vx(<16 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
   %v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
   ret <16 x i16> %v
 }
@@ -286,8 +286,8 @@ define <16 x i16> @sadd_v16i16_vi(<16 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> undef, i16 5, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i16> poison, i16 5, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
   %v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
   ret <16 x i16> %v
 }
@@ -310,8 +310,8 @@ define <2 x i32> @sadd_v2i32_vx(<2 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
   %v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
   ret <2 x i32> %v
 }
@@ -322,8 +322,8 @@ define <2 x i32> @sadd_v2i32_vi(<2 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> undef, i32 5, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i32> poison, i32 5, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
   %v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
   ret <2 x i32> %v
 }
@@ -346,8 +346,8 @@ define <4 x i32> @sadd_v4i32_vx(<4 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
   ret <4 x i32> %v
 }
@@ -358,8 +358,8 @@ define <4 x i32> @sadd_v4i32_vi(<4 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> undef, i32 5, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i32> poison, i32 5, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
   ret <4 x i32> %v
 }
@@ -382,8 +382,8 @@ define <8 x i32> @sadd_v8i32_vx(<8 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
   %v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
   ret <8 x i32> %v
 }
@@ -394,8 +394,8 @@ define <8 x i32> @sadd_v8i32_vi(<8 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> undef, i32 5, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i32> poison, i32 5, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
   %v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
   ret <8 x i32> %v
 }
@@ -418,8 +418,8 @@ define <16 x i32> @sadd_v16i32_vx(<16 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
   %v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
   ret <16 x i32> %v
 }
@@ -430,8 +430,8 @@ define <16 x i32> @sadd_v16i32_vi(<16 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> undef, i32 5, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i32> poison, i32 5, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
   %v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
   ret <16 x i32> %v
 }
@@ -467,8 +467,8 @@ define <2 x i64> @sadd_v2i64_vx(<2 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vsadd.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
   ret <2 x i64> %v
 }
@@ -479,8 +479,8 @@ define <2 x i64> @sadd_v2i64_vi(<2 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> undef, i64 5, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i64> poison, i64 5, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
   ret <2 x i64> %v
 }
@@ -516,8 +516,8 @@ define <4 x i64> @sadd_v4i64_vx(<4 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vsadd.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
   ret <4 x i64> %v
 }
@@ -528,8 +528,8 @@ define <4 x i64> @sadd_v4i64_vi(<4 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> undef, i64 5, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i64> poison, i64 5, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
   ret <4 x i64> %v
 }
@@ -565,8 +565,8 @@ define <8 x i64> @sadd_v8i64_vx(<8 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vsadd.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
   ret <8 x i64> %v
 }
@@ -577,8 +577,8 @@ define <8 x i64> @sadd_v8i64_vi(<8 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> undef, i64 5, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i64> poison, i64 5, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
   ret <8 x i64> %v
 }
@@ -614,8 +614,8 @@ define <16 x i64> @sadd_v16i64_vx(<16 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV64-NEXT:    vsadd.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
   ret <16 x i64> %v
 }
@@ -626,8 +626,8 @@ define <16 x i64> @sadd_v16i64_vi(<16 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> undef, i64 5, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i64> poison, i64 5, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
   ret <16 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
index f3a288c4b2aa..df57364d8d41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
@@ -22,8 +22,8 @@ define <2 x i8> @uadd_v2i8_vx(<2 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
   %v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
   ret <2 x i8> %v
 }
@@ -34,8 +34,8 @@ define <2 x i8> @uadd_v2i8_vi(<2 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> undef, i8 8, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i8> poison, i8 8, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
   %v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
   ret <2 x i8> %v
 }
@@ -58,8 +58,8 @@ define <4 x i8> @uadd_v4i8_vx(<4 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
   ret <4 x i8> %v
 }
@@ -70,8 +70,8 @@ define <4 x i8> @uadd_v4i8_vi(<4 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> undef, i8 8, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i8> poison, i8 8, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
   ret <4 x i8> %v
 }
@@ -94,8 +94,8 @@ define <8 x i8> @uadd_v8i8_vx(<8 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
   %v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
   ret <8 x i8> %v
 }
@@ -106,8 +106,8 @@ define <8 x i8> @uadd_v8i8_vi(<8 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> undef, i8 8, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i8> poison, i8 8, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
   %v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
   ret <8 x i8> %v
 }
@@ -130,8 +130,8 @@ define <16 x i8> @uadd_v16i8_vx(<16 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
   %v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
   ret <16 x i8> %v
 }
@@ -142,8 +142,8 @@ define <16 x i8> @uadd_v16i8_vi(<16 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> undef, i8 8, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i8> poison, i8 8, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
   %v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
   ret <16 x i8> %v
 }
@@ -166,8 +166,8 @@ define <2 x i16> @uadd_v2i16_vx(<2 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
   %v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
   ret <2 x i16> %v
 }
@@ -178,8 +178,8 @@ define <2 x i16> @uadd_v2i16_vi(<2 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> undef, i16 8, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i16> poison, i16 8, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
   %v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
   ret <2 x i16> %v
 }
@@ -202,8 +202,8 @@ define <4 x i16> @uadd_v4i16_vx(<4 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
   ret <4 x i16> %v
 }
@@ -214,8 +214,8 @@ define <4 x i16> @uadd_v4i16_vi(<4 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> undef, i16 8, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i16> poison, i16 8, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
   ret <4 x i16> %v
 }
@@ -238,8 +238,8 @@ define <8 x i16> @uadd_v8i16_vx(<8 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
   %v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
   ret <8 x i16> %v
 }
@@ -250,8 +250,8 @@ define <8 x i16> @uadd_v8i16_vi(<8 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> undef, i16 8, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i16> poison, i16 8, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
   %v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
   ret <8 x i16> %v
 }
@@ -274,8 +274,8 @@ define <16 x i16> @uadd_v16i16_vx(<16 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
   %v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
   ret <16 x i16> %v
 }
@@ -286,8 +286,8 @@ define <16 x i16> @uadd_v16i16_vi(<16 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> undef, i16 8, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i16> poison, i16 8, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
   %v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
   ret <16 x i16> %v
 }
@@ -310,8 +310,8 @@ define <2 x i32> @uadd_v2i32_vx(<2 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
   %v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
   ret <2 x i32> %v
 }
@@ -322,8 +322,8 @@ define <2 x i32> @uadd_v2i32_vi(<2 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> undef, i32 8, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i32> poison, i32 8, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
   %v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
   ret <2 x i32> %v
 }
@@ -346,8 +346,8 @@ define <4 x i32> @uadd_v4i32_vx(<4 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
   ret <4 x i32> %v
 }
@@ -358,8 +358,8 @@ define <4 x i32> @uadd_v4i32_vi(<4 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> undef, i32 8, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i32> poison, i32 8, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
   ret <4 x i32> %v
 }
@@ -382,8 +382,8 @@ define <8 x i32> @uadd_v8i32_vx(<8 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
   %v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
   ret <8 x i32> %v
 }
@@ -394,8 +394,8 @@ define <8 x i32> @uadd_v8i32_vi(<8 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> undef, i32 8, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i32> poison, i32 8, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
   %v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
   ret <8 x i32> %v
 }
@@ -418,8 +418,8 @@ define <16 x i32> @uadd_v16i32_vx(<16 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
   %v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
   ret <16 x i32> %v
 }
@@ -430,8 +430,8 @@ define <16 x i32> @uadd_v16i32_vi(<16 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> undef, i32 8, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i32> poison, i32 8, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
   %v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
   ret <16 x i32> %v
 }
@@ -467,8 +467,8 @@ define <2 x i64> @uadd_v2i64_vx(<2 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vsaddu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
   ret <2 x i64> %v
 }
@@ -479,8 +479,8 @@ define <2 x i64> @uadd_v2i64_vi(<2 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> undef, i64 8, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i64> poison, i64 8, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
   ret <2 x i64> %v
 }
@@ -516,8 +516,8 @@ define <4 x i64> @uadd_v4i64_vx(<4 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vsaddu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
   ret <4 x i64> %v
 }
@@ -528,8 +528,8 @@ define <4 x i64> @uadd_v4i64_vi(<4 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> undef, i64 8, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i64> poison, i64 8, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
   ret <4 x i64> %v
 }
@@ -565,8 +565,8 @@ define <8 x i64> @uadd_v8i64_vx(<8 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vsaddu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
   ret <8 x i64> %v
 }
@@ -577,8 +577,8 @@ define <8 x i64> @uadd_v8i64_vi(<8 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> undef, i64 8, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i64> poison, i64 8, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
   ret <8 x i64> %v
 }
@@ -614,8 +614,8 @@ define <16 x i64> @uadd_v16i64_vx(<16 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV64-NEXT:    vsaddu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
   ret <16 x i64> %v
 }
@@ -626,8 +626,8 @@ define <16 x i64> @uadd_v16i64_vi(<16 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> undef, i64 8, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i64> poison, i64 8, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
   ret <16 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
index fc5a136633b4..8935a68892a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
@@ -30,8 +30,8 @@ define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %
 ; CHECK-NEXT:    vse32.v v8, (a3)
 ; CHECK-NEXT:    ret
   %vb = load <8 x i32>, <8 x i32>* %b
-  %ahead = insertelement <8 x i32> undef, i32 %a, i32 0
-  %va = shufflevector <8 x i32> %ahead, <8 x i32> undef, <8 x i32> zeroinitializer
+  %ahead = insertelement <8 x i32> poison, i32 %a, i32 0
+  %va = shufflevector <8 x i32> %ahead, <8 x i32> poison, <8 x i32> zeroinitializer
   %vcc = load <8 x i1>, <8 x i1>* %cc
   %vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
   store <8 x i32> %vsel, <8 x i32>* %z
@@ -48,8 +48,8 @@ define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <8 x i32>, <8 x i32>* %b
-  %a = insertelement <8 x i32> undef, i32 -1, i32 0
-  %va = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x i32> poison, i32 -1, i32 0
+  %va = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
   %vcc = load <8 x i1>, <8 x i1>* %cc
   %vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
   store <8 x i32> %vsel, <8 x i32>* %z
@@ -84,8 +84,8 @@ define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x flo
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <8 x float>, <8 x float>* %b
-  %ahead = insertelement <8 x float> undef, float %a, i32 0
-  %va = shufflevector <8 x float> %ahead, <8 x float> undef, <8 x i32> zeroinitializer
+  %ahead = insertelement <8 x float> poison, float %a, i32 0
+  %va = shufflevector <8 x float> %ahead, <8 x float> poison, <8 x i32> zeroinitializer
   %vcc = load <8 x i1>, <8 x i1>* %cc
   %vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
   store <8 x float> %vsel, <8 x float>* %z
@@ -102,8 +102,8 @@ define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>*
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <8 x float>, <8 x float>* %b
-  %a = insertelement <8 x float> undef, float 0.0, i32 0
-  %va = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
+  %a = insertelement <8 x float> poison, float 0.0, i32 0
+  %va = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
   %vcc = load <8 x i1>, <8 x i1>* %cc
   %vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
   store <8 x float> %vsel, <8 x float>* %z
@@ -138,8 +138,8 @@ define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <
 ; CHECK-NEXT:    vse16.v v8, (a3)
 ; CHECK-NEXT:    ret
   %vb = load <16 x i16>, <16 x i16>* %b
-  %ahead = insertelement <16 x i16> undef, i16 %a, i32 0
-  %va = shufflevector <16 x i16> %ahead, <16 x i16> undef, <16 x i32> zeroinitializer
+  %ahead = insertelement <16 x i16> poison, i16 %a, i32 0
+  %va = shufflevector <16 x i16> %ahead, <16 x i16> poison, <16 x i32> zeroinitializer
   %vcc = load <16 x i1>, <16 x i1>* %cc
   %vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
   store <16 x i16> %vsel, <16 x i16>* %z
@@ -156,8 +156,8 @@ define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <16 x i16>, <16 x i16>* %b
-  %a = insertelement <16 x i16> undef, i16 4, i32 0
-  %va = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
+  %a = insertelement <16 x i16> poison, i16 4, i32 0
+  %va = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
   %vcc = load <16 x i1>, <16 x i1>* %cc
   %vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
   store <16 x i16> %vsel, <16 x i16>* %z
@@ -194,8 +194,8 @@ define void @vselect_vx_v32f16(half %a, <32 x half>* %b, <32 x i1>* %cc, <32 x h
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <32 x half>, <32 x half>* %b
-  %ahead = insertelement <32 x half> undef, half %a, i32 0
-  %va = shufflevector <32 x half> %ahead, <32 x half> undef, <32 x i32> zeroinitializer
+  %ahead = insertelement <32 x half> poison, half %a, i32 0
+  %va = shufflevector <32 x half> %ahead, <32 x half> poison, <32 x i32> zeroinitializer
   %vcc = load <32 x i1>, <32 x i1>* %cc
   %vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
   store <32 x half> %vsel, <32 x half>* %z
@@ -213,8 +213,8 @@ define void @vselect_vfpzero_v32f16(<32 x half>* %b, <32 x i1>* %cc, <32 x half>
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <32 x half>, <32 x half>* %b
-  %a = insertelement <32 x half> undef, half 0.0, i32 0
-  %va = shufflevector <32 x half> %a, <32 x half> undef, <32 x i32> zeroinitializer
+  %a = insertelement <32 x half> poison, half 0.0, i32 0
+  %va = shufflevector <32 x half> %a, <32 x half> poison, <32 x i32> zeroinitializer
   %vcc = load <32 x i1>, <32 x i1>* %cc
   %vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
   store <32 x half> %vsel, <32 x half>* %z

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
index e9ef8b6971ac..5282c986ac00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
@@ -22,8 +22,8 @@ define <2 x i8> @ssub_v2i8_vx(<2 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
   %v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
   ret <2 x i8> %v
 }
@@ -35,8 +35,8 @@ define <2 x i8> @ssub_v2i8_vi(<2 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> undef, i8 1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i8> poison, i8 1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
   %v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
   ret <2 x i8> %v
 }
@@ -59,8 +59,8 @@ define <4 x i8> @ssub_v4i8_vx(<4 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
   ret <4 x i8> %v
 }
@@ -72,8 +72,8 @@ define <4 x i8> @ssub_v4i8_vi(<4 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> undef, i8 1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i8> poison, i8 1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
   ret <4 x i8> %v
 }
@@ -96,8 +96,8 @@ define <8 x i8> @ssub_v8i8_vx(<8 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
   %v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
   ret <8 x i8> %v
 }
@@ -109,8 +109,8 @@ define <8 x i8> @ssub_v8i8_vi(<8 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> undef, i8 1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i8> poison, i8 1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
   %v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
   ret <8 x i8> %v
 }
@@ -133,8 +133,8 @@ define <16 x i8> @ssub_v16i8_vx(<16 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
   %v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
   ret <16 x i8> %v
 }
@@ -146,8 +146,8 @@ define <16 x i8> @ssub_v16i8_vi(<16 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> undef, i8 1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i8> poison, i8 1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
   %v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
   ret <16 x i8> %v
 }
@@ -170,8 +170,8 @@ define <2 x i16> @ssub_v2i16_vx(<2 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
   %v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
   ret <2 x i16> %v
 }
@@ -183,8 +183,8 @@ define <2 x i16> @ssub_v2i16_vi(<2 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> undef, i16 1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i16> poison, i16 1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
   %v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
   ret <2 x i16> %v
 }
@@ -207,8 +207,8 @@ define <4 x i16> @ssub_v4i16_vx(<4 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
   ret <4 x i16> %v
 }
@@ -220,8 +220,8 @@ define <4 x i16> @ssub_v4i16_vi(<4 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> undef, i16 1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i16> poison, i16 1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
   ret <4 x i16> %v
 }
@@ -244,8 +244,8 @@ define <8 x i16> @ssub_v8i16_vx(<8 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
   %v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
   ret <8 x i16> %v
 }
@@ -257,8 +257,8 @@ define <8 x i16> @ssub_v8i16_vi(<8 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> undef, i16 1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i16> poison, i16 1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
   %v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
   ret <8 x i16> %v
 }
@@ -281,8 +281,8 @@ define <16 x i16> @ssub_v16i16_vx(<16 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
   %v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
   ret <16 x i16> %v
 }
@@ -294,8 +294,8 @@ define <16 x i16> @ssub_v16i16_vi(<16 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> undef, i16 1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i16> poison, i16 1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
   %v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
   ret <16 x i16> %v
 }
@@ -318,8 +318,8 @@ define <2 x i32> @ssub_v2i32_vx(<2 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
   %v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
   ret <2 x i32> %v
 }
@@ -331,8 +331,8 @@ define <2 x i32> @ssub_v2i32_vi(<2 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> undef, i32 1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i32> poison, i32 1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
   %v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
   ret <2 x i32> %v
 }
@@ -355,8 +355,8 @@ define <4 x i32> @ssub_v4i32_vx(<4 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
   ret <4 x i32> %v
 }
@@ -368,8 +368,8 @@ define <4 x i32> @ssub_v4i32_vi(<4 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> undef, i32 1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i32> poison, i32 1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
   ret <4 x i32> %v
 }
@@ -392,8 +392,8 @@ define <8 x i32> @ssub_v8i32_vx(<8 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
   %v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
   ret <8 x i32> %v
 }
@@ -405,8 +405,8 @@ define <8 x i32> @ssub_v8i32_vi(<8 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> undef, i32 1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i32> poison, i32 1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
   %v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
   ret <8 x i32> %v
 }
@@ -429,8 +429,8 @@ define <16 x i32> @ssub_v16i32_vx(<16 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
   %v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
   ret <16 x i32> %v
 }
@@ -442,8 +442,8 @@ define <16 x i32> @ssub_v16i32_vi(<16 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> undef, i32 1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i32> poison, i32 1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
   %v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
   ret <16 x i32> %v
 }
@@ -479,8 +479,8 @@ define <2 x i64> @ssub_v2i64_vx(<2 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vssub.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
   ret <2 x i64> %v
 }
@@ -492,8 +492,8 @@ define <2 x i64> @ssub_v2i64_vi(<2 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> undef, i64 1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i64> poison, i64 1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
   ret <2 x i64> %v
 }
@@ -529,8 +529,8 @@ define <4 x i64> @ssub_v4i64_vx(<4 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vssub.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
   ret <4 x i64> %v
 }
@@ -542,8 +542,8 @@ define <4 x i64> @ssub_v4i64_vi(<4 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> undef, i64 1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i64> poison, i64 1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
   ret <4 x i64> %v
 }
@@ -579,8 +579,8 @@ define <8 x i64> @ssub_v8i64_vx(<8 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vssub.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
   ret <8 x i64> %v
 }
@@ -592,8 +592,8 @@ define <8 x i64> @ssub_v8i64_vi(<8 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> undef, i64 1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i64> poison, i64 1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
   ret <8 x i64> %v
 }
@@ -629,8 +629,8 @@ define <16 x i64> @ssub_v16i64_vx(<16 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV64-NEXT:    vssub.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
   ret <16 x i64> %v
 }
@@ -642,8 +642,8 @@ define <16 x i64> @ssub_v16i64_vi(<16 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> undef, i64 1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i64> poison, i64 1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
   ret <16 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
index 1a7cba285437..df49779fc03b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
@@ -22,8 +22,8 @@ define <2 x i8> @usub_v2i8_vx(<2 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
   %v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
   ret <2 x i8> %v
 }
@@ -35,8 +35,8 @@ define <2 x i8> @usub_v2i8_vi(<2 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> undef, i8 2, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i8> poison, i8 2, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
   %v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
   ret <2 x i8> %v
 }
@@ -59,8 +59,8 @@ define <4 x i8> @usub_v4i8_vx(<4 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
   ret <4 x i8> %v
 }
@@ -72,8 +72,8 @@ define <4 x i8> @usub_v4i8_vi(<4 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> undef, i8 2, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i8> poison, i8 2, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
   %v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
   ret <4 x i8> %v
 }
@@ -96,8 +96,8 @@ define <8 x i8> @usub_v8i8_vx(<8 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
   %v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
   ret <8 x i8> %v
 }
@@ -109,8 +109,8 @@ define <8 x i8> @usub_v8i8_vi(<8 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> undef, i8 2, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i8> poison, i8 2, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
   %v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
   ret <8 x i8> %v
 }
@@ -133,8 +133,8 @@ define <16 x i8> @usub_v16i8_vx(<16 x i8> %va, i8 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
   %v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
   ret <16 x i8> %v
 }
@@ -146,8 +146,8 @@ define <16 x i8> @usub_v16i8_vi(<16 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> undef, i8 2, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i8> poison, i8 2, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
   %v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
   ret <16 x i8> %v
 }
@@ -170,8 +170,8 @@ define <2 x i16> @usub_v2i16_vx(<2 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
   %v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
   ret <2 x i16> %v
 }
@@ -183,8 +183,8 @@ define <2 x i16> @usub_v2i16_vi(<2 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> undef, i16 2, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i16> poison, i16 2, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
   %v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
   ret <2 x i16> %v
 }
@@ -207,8 +207,8 @@ define <4 x i16> @usub_v4i16_vx(<4 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
   ret <4 x i16> %v
 }
@@ -220,8 +220,8 @@ define <4 x i16> @usub_v4i16_vi(<4 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> undef, i16 2, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i16> poison, i16 2, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
   %v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
   ret <4 x i16> %v
 }
@@ -244,8 +244,8 @@ define <8 x i16> @usub_v8i16_vx(<8 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
   %v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
   ret <8 x i16> %v
 }
@@ -257,8 +257,8 @@ define <8 x i16> @usub_v8i16_vi(<8 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> undef, i16 2, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i16> poison, i16 2, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
   %v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
   ret <8 x i16> %v
 }
@@ -281,8 +281,8 @@ define <16 x i16> @usub_v16i16_vx(<16 x i16> %va, i16 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
   %v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
   ret <16 x i16> %v
 }
@@ -294,8 +294,8 @@ define <16 x i16> @usub_v16i16_vi(<16 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> undef, i16 2, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i16> poison, i16 2, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
   %v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
   ret <16 x i16> %v
 }
@@ -318,8 +318,8 @@ define <2 x i32> @usub_v2i32_vx(<2 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
   %v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
   ret <2 x i32> %v
 }
@@ -331,8 +331,8 @@ define <2 x i32> @usub_v2i32_vi(<2 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> undef, i32 2, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i32> poison, i32 2, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
   %v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
   ret <2 x i32> %v
 }
@@ -355,8 +355,8 @@ define <4 x i32> @usub_v4i32_vx(<4 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
   ret <4 x i32> %v
 }
@@ -368,8 +368,8 @@ define <4 x i32> @usub_v4i32_vi(<4 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> undef, i32 2, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i32> poison, i32 2, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
   ret <4 x i32> %v
 }
@@ -392,8 +392,8 @@ define <8 x i32> @usub_v8i32_vx(<8 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
   %v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
   ret <8 x i32> %v
 }
@@ -405,8 +405,8 @@ define <8 x i32> @usub_v8i32_vi(<8 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> undef, i32 2, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i32> poison, i32 2, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
   %v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
   ret <8 x i32> %v
 }
@@ -429,8 +429,8 @@ define <16 x i32> @usub_v16i32_vx(<16 x i32> %va, i32 %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
   %v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
   ret <16 x i32> %v
 }
@@ -442,8 +442,8 @@ define <16 x i32> @usub_v16i32_vi(<16 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> undef, i32 2, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i32> poison, i32 2, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
   %v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
   ret <16 x i32> %v
 }
@@ -479,8 +479,8 @@ define <2 x i64> @usub_v2i64_vx(<2 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vssubu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
   ret <2 x i64> %v
 }
@@ -492,8 +492,8 @@ define <2 x i64> @usub_v2i64_vi(<2 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> undef, i64 2, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+  %elt.head = insertelement <2 x i64> poison, i64 2, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
   ret <2 x i64> %v
 }
@@ -529,8 +529,8 @@ define <4 x i64> @usub_v4i64_vx(<4 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vssubu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
   ret <4 x i64> %v
 }
@@ -542,8 +542,8 @@ define <4 x i64> @usub_v4i64_vi(<4 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> undef, i64 2, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+  %elt.head = insertelement <4 x i64> poison, i64 2, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
   ret <4 x i64> %v
 }
@@ -579,8 +579,8 @@ define <8 x i64> @usub_v8i64_vx(<8 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vssubu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
   ret <8 x i64> %v
 }
@@ -592,8 +592,8 @@ define <8 x i64> @usub_v8i64_vi(<8 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> undef, i64 2, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+  %elt.head = insertelement <8 x i64> poison, i64 2, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
   ret <8 x i64> %v
 }
@@ -629,8 +629,8 @@ define <16 x i64> @usub_v16i64_vx(<16 x i64> %va, i64 %b) {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV64-NEXT:    vssubu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
   ret <16 x i64> %v
 }
@@ -642,8 +642,8 @@ define <16 x i64> @usub_v16i64_vi(<16 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> undef, i64 2, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %elt.head = insertelement <16 x i64> poison, i64 2, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
   ret <16 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll
index 86dd1a216cc5..eac549342c2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll
@@ -268,8 +268,8 @@ define <2 x i16> @vwmacc_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <2 x i8>, <2 x i8>* %x
-  %b = insertelement <2 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i8> %a to <2 x i16>
   %e = sext <2 x i8> %c to <2 x i16>
   %f = mul <2 x i16> %d, %e
@@ -285,8 +285,8 @@ define <4 x i16> @vwmacc_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, <4 x i8>* %x
-  %b = insertelement <4 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i8> %a to <4 x i16>
   %e = sext <4 x i8> %c to <4 x i16>
   %f = mul <4 x i16> %d, %e
@@ -302,8 +302,8 @@ define <2 x i32> @vwmacc_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <2 x i16>, <2 x i16>* %x
-  %b = insertelement <2 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <2 x i16> %b, <2 x i16> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i16> %a to <2 x i32>
   %e = sext <2 x i16> %c to <2 x i32>
   %f = mul <2 x i32> %d, %e
@@ -319,8 +319,8 @@ define <8 x i16> @vwmacc_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i8> %a to <8 x i16>
   %e = sext <8 x i8> %c to <8 x i16>
   %f = mul <8 x i16> %d, %e
@@ -336,8 +336,8 @@ define <4 x i32> @vwmacc_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, <4 x i16>* %x
-  %b = insertelement <4 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <4 x i16> %b, <4 x i16> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i16> %a to <4 x i32>
   %e = sext <4 x i16> %c to <4 x i32>
   %f = mul <4 x i32> %d, %e
@@ -353,8 +353,8 @@ define <2 x i64> @vwmacc_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <2 x i32>, <2 x i32>* %x
-  %b = insertelement <2 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <2 x i32> %b, <2 x i32> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i32> %a to <2 x i64>
   %e = sext <2 x i32> %c to <2 x i64>
   %f = mul <2 x i64> %d, %e
@@ -370,8 +370,8 @@ define <16 x i16> @vwmacc_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v10
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i8> %a to <16 x i16>
   %e = sext <16 x i8> %c to <16 x i16>
   %f = mul <16 x i16> %d, %e
@@ -387,8 +387,8 @@ define <8 x i32> @vwmacc_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v10
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i16> %a to <8 x i32>
   %e = sext <8 x i16> %c to <8 x i32>
   %f = mul <8 x i32> %d, %e
@@ -404,8 +404,8 @@ define <4 x i64> @vwmacc_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v10
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i32> %a to <4 x i64>
   %e = sext <4 x i32> %c to <4 x i64>
   %f = mul <4 x i64> %d, %e
@@ -422,8 +422,8 @@ define <32 x i16> @vwmacc_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v12
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = sext <32 x i8> %a to <32 x i16>
   %e = sext <32 x i8> %c to <32 x i16>
   %f = mul <32 x i16> %d, %e
@@ -439,8 +439,8 @@ define <16 x i32> @vwmacc_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v12
 ; CHECK-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %x
-  %b = insertelement <16 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <16 x i16> %b, <16 x i16> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i16> %a to <16 x i32>
   %e = sext <16 x i16> %c to <16 x i32>
   %f = mul <16 x i32> %d, %e
@@ -456,8 +456,8 @@ define <8 x i64> @vwmacc_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v12
 ; CHECK-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
-  %b = insertelement <8 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <8 x i32> %b, <8 x i32> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i32> %a to <8 x i64>
   %e = sext <8 x i32> %c to <8 x i64>
   %f = mul <8 x i64> %d, %e
@@ -474,8 +474,8 @@ define <64 x i16> @vwmacc_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v16
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = sext <64 x i8> %a to <64 x i16>
   %e = sext <64 x i8> %c to <64 x i16>
   %f = mul <64 x i16> %d, %e
@@ -492,8 +492,8 @@ define <32 x i32> @vwmacc_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v16
 ; CHECK-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %x
-  %b = insertelement <32 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <32 x i16> %b, <32 x i16> poison, <32 x i32> zeroinitializer
   %d = sext <32 x i16> %a to <32 x i32>
   %e = sext <32 x i16> %c to <32 x i32>
   %f = mul <32 x i32> %d, %e
@@ -509,8 +509,8 @@ define <16 x i64> @vwmacc_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) {
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v16
 ; CHECK-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %x
-  %b = insertelement <16 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <16 x i32> %b, <16 x i32> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i32> %a to <16 x i64>
   %e = sext <16 x i32> %c to <16 x i64>
   %f = mul <16 x i64> %d, %e

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll
index 358f7eb7b992..b3e9663b8acb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll
@@ -268,8 +268,8 @@ define <2 x i16> @vwmaccu_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <2 x i8>, <2 x i8>* %x
-  %b = insertelement <2 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
   %d = zext <2 x i8> %a to <2 x i16>
   %e = zext <2 x i8> %c to <2 x i16>
   %f = mul <2 x i16> %d, %e
@@ -285,8 +285,8 @@ define <4 x i16> @vwmaccu_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, <4 x i8>* %x
-  %b = insertelement <4 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> zeroinitializer
   %d = zext <4 x i8> %a to <4 x i16>
   %e = zext <4 x i8> %c to <4 x i16>
   %f = mul <4 x i16> %d, %e
@@ -302,8 +302,8 @@ define <2 x i32> @vwmaccu_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <2 x i16>, <2 x i16>* %x
-  %b = insertelement <2 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <2 x i16> %b, <2 x i16> poison, <2 x i32> zeroinitializer
   %d = zext <2 x i16> %a to <2 x i32>
   %e = zext <2 x i16> %c to <2 x i32>
   %f = mul <2 x i32> %d, %e
@@ -319,8 +319,8 @@ define <8 x i16> @vwmaccu_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = zext <8 x i8> %a to <8 x i16>
   %e = zext <8 x i8> %c to <8 x i16>
   %f = mul <8 x i16> %d, %e
@@ -336,8 +336,8 @@ define <4 x i32> @vwmaccu_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, <4 x i16>* %x
-  %b = insertelement <4 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <4 x i16> %b, <4 x i16> poison, <4 x i32> zeroinitializer
   %d = zext <4 x i16> %a to <4 x i32>
   %e = zext <4 x i16> %c to <4 x i32>
   %f = mul <4 x i32> %d, %e
@@ -353,8 +353,8 @@ define <2 x i64> @vwmaccu_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v9
 ; CHECK-NEXT:    ret
   %a = load <2 x i32>, <2 x i32>* %x
-  %b = insertelement <2 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <2 x i32> %b, <2 x i32> poison, <2 x i32> zeroinitializer
   %d = zext <2 x i32> %a to <2 x i64>
   %e = zext <2 x i32> %c to <2 x i64>
   %f = mul <2 x i64> %d, %e
@@ -370,8 +370,8 @@ define <16 x i16> @vwmaccu_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v10
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = zext <16 x i8> %a to <16 x i16>
   %e = zext <16 x i8> %c to <16 x i16>
   %f = mul <16 x i16> %d, %e
@@ -387,8 +387,8 @@ define <8 x i32> @vwmaccu_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v10
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = zext <8 x i16> %a to <8 x i32>
   %e = zext <8 x i16> %c to <8 x i32>
   %f = mul <8 x i32> %d, %e
@@ -404,8 +404,8 @@ define <4 x i64> @vwmaccu_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v10
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = zext <4 x i32> %a to <4 x i64>
   %e = zext <4 x i32> %c to <4 x i64>
   %f = mul <4 x i64> %d, %e
@@ -422,8 +422,8 @@ define <32 x i16> @vwmaccu_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v12
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = zext <32 x i8> %a to <32 x i16>
   %e = zext <32 x i8> %c to <32 x i16>
   %f = mul <32 x i16> %d, %e
@@ -439,8 +439,8 @@ define <16 x i32> @vwmaccu_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v12
 ; CHECK-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %x
-  %b = insertelement <16 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <16 x i16> %b, <16 x i16> poison, <16 x i32> zeroinitializer
   %d = zext <16 x i16> %a to <16 x i32>
   %e = zext <16 x i16> %c to <16 x i32>
   %f = mul <16 x i32> %d, %e
@@ -456,8 +456,8 @@ define <8 x i64> @vwmaccu_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v12
 ; CHECK-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
-  %b = insertelement <8 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <8 x i32> %b, <8 x i32> poison, <8 x i32> zeroinitializer
   %d = zext <8 x i32> %a to <8 x i64>
   %e = zext <8 x i32> %c to <8 x i64>
   %f = mul <8 x i64> %d, %e
@@ -474,8 +474,8 @@ define <64 x i16> @vwmaccu_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v16
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = zext <64 x i8> %a to <64 x i16>
   %e = zext <64 x i8> %c to <64 x i16>
   %f = mul <64 x i16> %d, %e
@@ -492,8 +492,8 @@ define <32 x i32> @vwmaccu_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v16
 ; CHECK-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %x
-  %b = insertelement <32 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <32 x i16> %b, <32 x i16> poison, <32 x i32> zeroinitializer
   %d = zext <32 x i16> %a to <32 x i32>
   %e = zext <32 x i16> %c to <32 x i32>
   %f = mul <32 x i32> %d, %e
@@ -509,8 +509,8 @@ define <16 x i64> @vwmaccu_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) {
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v16
 ; CHECK-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %x
-  %b = insertelement <16 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <16 x i32> %b, <16 x i32> poison, <16 x i32> zeroinitializer
   %d = zext <16 x i32> %a to <16 x i64>
   %e = zext <16 x i32> %c to <16 x i64>
   %f = mul <16 x i64> %d, %e

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index e7180587f977..c219c36f0d6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -414,8 +414,8 @@ define <2 x i16> @vwmul_vx_v2i16(<2 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i8>, <2 x i8>* %x
-  %b = insertelement <2 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i8> %a to <2 x i16>
   %e = sext <2 x i8> %c to <2 x i16>
   %f = mul <2 x i16> %d, %e
@@ -430,8 +430,8 @@ define <4 x i16> @vwmul_vx_v4i16(<4 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, <4 x i8>* %x
-  %b = insertelement <4 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i8> %a to <4 x i16>
   %e = sext <4 x i8> %c to <4 x i16>
   %f = mul <4 x i16> %d, %e
@@ -446,8 +446,8 @@ define <2 x i32> @vwmul_vx_v2i32(<2 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i16>, <2 x i16>* %x
-  %b = insertelement <2 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <2 x i16> %b, <2 x i16> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i16> %a to <2 x i32>
   %e = sext <2 x i16> %c to <2 x i32>
   %f = mul <2 x i32> %d, %e
@@ -462,8 +462,8 @@ define <8 x i16> @vwmul_vx_v8i16(<8 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i8> %a to <8 x i16>
   %e = sext <8 x i8> %c to <8 x i16>
   %f = mul <8 x i16> %d, %e
@@ -478,8 +478,8 @@ define <4 x i32> @vwmul_vx_v4i32(<4 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, <4 x i16>* %x
-  %b = insertelement <4 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <4 x i16> %b, <4 x i16> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i16> %a to <4 x i32>
   %e = sext <4 x i16> %c to <4 x i32>
   %f = mul <4 x i32> %d, %e
@@ -494,8 +494,8 @@ define <2 x i64> @vwmul_vx_v2i64(<2 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i32>, <2 x i32>* %x
-  %b = insertelement <2 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <2 x i32> %b, <2 x i32> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i32> %a to <2 x i64>
   %e = sext <2 x i32> %c to <2 x i64>
   %f = mul <2 x i64> %d, %e
@@ -510,8 +510,8 @@ define <16 x i16> @vwmul_vx_v16i16(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i8> %a to <16 x i16>
   %e = sext <16 x i8> %c to <16 x i16>
   %f = mul <16 x i16> %d, %e
@@ -526,8 +526,8 @@ define <8 x i32> @vwmul_vx_v8i32(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i16> %a to <8 x i32>
   %e = sext <8 x i16> %c to <8 x i32>
   %f = mul <8 x i32> %d, %e
@@ -542,8 +542,8 @@ define <4 x i64> @vwmul_vx_v4i64(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i32> %a to <4 x i64>
   %e = sext <4 x i32> %c to <4 x i64>
   %f = mul <4 x i64> %d, %e
@@ -559,8 +559,8 @@ define <32 x i16> @vwmul_vx_v32i16(<32 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = sext <32 x i8> %a to <32 x i16>
   %e = sext <32 x i8> %c to <32 x i16>
   %f = mul <32 x i16> %d, %e
@@ -575,8 +575,8 @@ define <16 x i32> @vwmul_vx_v16i32(<16 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %x
-  %b = insertelement <16 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <16 x i16> %b, <16 x i16> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i16> %a to <16 x i32>
   %e = sext <16 x i16> %c to <16 x i32>
   %f = mul <16 x i32> %d, %e
@@ -591,8 +591,8 @@ define <8 x i64> @vwmul_vx_v8i64(<8 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
-  %b = insertelement <8 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <8 x i32> %b, <8 x i32> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i32> %a to <8 x i64>
   %e = sext <8 x i32> %c to <8 x i64>
   %f = mul <8 x i64> %d, %e
@@ -608,8 +608,8 @@ define <64 x i16> @vwmul_vx_v64i16(<64 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = sext <64 x i8> %a to <64 x i16>
   %e = sext <64 x i8> %c to <64 x i16>
   %f = mul <64 x i16> %d, %e
@@ -625,8 +625,8 @@ define <32 x i32> @vwmul_vx_v32i32(<32 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %x
-  %b = insertelement <32 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <32 x i16> %b, <32 x i16> poison, <32 x i32> zeroinitializer
   %d = sext <32 x i16> %a to <32 x i32>
   %e = sext <32 x i16> %c to <32 x i32>
   %f = mul <32 x i32> %d, %e
@@ -641,8 +641,8 @@ define <16 x i64> @vwmul_vx_v16i64(<16 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmul.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %x
-  %b = insertelement <16 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <16 x i32> %b, <16 x i32> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i32> %a to <16 x i64>
   %e = sext <16 x i32> %c to <16 x i64>
   %f = mul <16 x i64> %d, %e
@@ -660,8 +660,8 @@ define <8 x i16> @vwmul_vx_v8i16_i8(<8 x i8>* %x, i8* %y) {
   %a = load <8 x i8>, <8 x i8>* %x
   %b = load i8, i8* %y
   %c = sext i8 %b to i16
-  %d = insertelement <8 x i16> undef, i16 %c, i32 0
-  %e = shufflevector <8 x i16> %d, <8 x i16> undef, <8 x i32> zeroinitializer
+  %d = insertelement <8 x i16> poison, i16 %c, i32 0
+  %e = shufflevector <8 x i16> %d, <8 x i16> poison, <8 x i32> zeroinitializer
   %f = sext <8 x i8> %a to <8 x i16>
   %g = mul <8 x i16> %e, %f
   ret <8 x i16> %g
@@ -679,8 +679,8 @@ define <8 x i16> @vwmul_vx_v8i16_i16(<8 x i8>* %x, i16* %y) {
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = load i16, i16* %y
-  %d = insertelement <8 x i16> undef, i16 %b, i32 0
-  %e = shufflevector <8 x i16> %d, <8 x i16> undef, <8 x i32> zeroinitializer
+  %d = insertelement <8 x i16> poison, i16 %b, i32 0
+  %e = shufflevector <8 x i16> %d, <8 x i16> poison, <8 x i32> zeroinitializer
   %f = sext <8 x i8> %a to <8 x i16>
   %g = mul <8 x i16> %e, %f
   ret <8 x i16> %g
@@ -697,8 +697,8 @@ define <4 x i32> @vwmul_vx_v4i32_i8(<4 x i16>* %x, i8* %y) {
   %a = load <4 x i16>, <4 x i16>* %x
   %b = load i8, i8* %y
   %c = sext i8 %b to i32
-  %d = insertelement <4 x i32> undef, i32 %c, i32 0
-  %e = shufflevector <4 x i32> %d, <4 x i32> undef, <4 x i32> zeroinitializer
+  %d = insertelement <4 x i32> poison, i32 %c, i32 0
+  %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
   %f = sext <4 x i16> %a to <4 x i32>
   %g = mul <4 x i32> %e, %f
   ret <4 x i32> %g
@@ -715,8 +715,8 @@ define <4 x i32> @vwmul_vx_v4i32_i16(<4 x i16>* %x, i16* %y) {
   %a = load <4 x i16>, <4 x i16>* %x
   %b = load i16, i16* %y
   %c = sext i16 %b to i32
-  %d = insertelement <4 x i32> undef, i32 %c, i32 0
-  %e = shufflevector <4 x i32> %d, <4 x i32> undef, <4 x i32> zeroinitializer
+  %d = insertelement <4 x i32> poison, i32 %c, i32 0
+  %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
   %f = sext <4 x i16> %a to <4 x i32>
   %g = mul <4 x i32> %e, %f
   ret <4 x i32> %g
@@ -734,8 +734,8 @@ define <4 x i32> @vwmul_vx_v4i32_i32(<4 x i16>* %x, i32* %y) {
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, <4 x i16>* %x
   %b = load i32, i32* %y
-  %d = insertelement <4 x i32> undef, i32 %b, i32 0
-  %e = shufflevector <4 x i32> %d, <4 x i32> undef, <4 x i32> zeroinitializer
+  %d = insertelement <4 x i32> poison, i32 %b, i32 0
+  %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
   %f = sext <4 x i16> %a to <4 x i32>
   %g = mul <4 x i32> %e, %f
   ret <4 x i32> %g
@@ -770,8 +770,8 @@ define <2 x i64> @vwmul_vx_v2i64_i8(<2 x i32>* %x, i8* %y) {
   %a = load <2 x i32>, <2 x i32>* %x
   %b = load i8, i8* %y
   %c = sext i8 %b to i64
-  %d = insertelement <2 x i64> undef, i64 %c, i64 0
-  %e = shufflevector <2 x i64> %d, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = insertelement <2 x i64> poison, i64 %c, i64 0
+  %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
   %f = sext <2 x i32> %a to <2 x i64>
   %g = mul <2 x i64> %e, %f
   ret <2 x i64> %g
@@ -806,8 +806,8 @@ define <2 x i64> @vwmul_vx_v2i64_i16(<2 x i32>* %x, i16* %y) {
   %a = load <2 x i32>, <2 x i32>* %x
   %b = load i16, i16* %y
   %c = sext i16 %b to i64
-  %d = insertelement <2 x i64> undef, i64 %c, i64 0
-  %e = shufflevector <2 x i64> %d, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = insertelement <2 x i64> poison, i64 %c, i64 0
+  %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
   %f = sext <2 x i32> %a to <2 x i64>
   %g = mul <2 x i64> %e, %f
   ret <2 x i64> %g
@@ -842,8 +842,8 @@ define <2 x i64> @vwmul_vx_v2i64_i32(<2 x i32>* %x, i32* %y) {
   %a = load <2 x i32>, <2 x i32>* %x
   %b = load i32, i32* %y
   %c = sext i32 %b to i64
-  %d = insertelement <2 x i64> undef, i64 %c, i64 0
-  %e = shufflevector <2 x i64> %d, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = insertelement <2 x i64> poison, i64 %c, i64 0
+  %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
   %f = sext <2 x i32> %a to <2 x i64>
   %g = mul <2 x i64> %e, %f
   ret <2 x i64> %g
@@ -879,8 +879,8 @@ define <2 x i64> @vwmul_vx_v2i64_i64(<2 x i32>* %x, i64* %y) {
 ; RV64-NEXT:    ret
   %a = load <2 x i32>, <2 x i32>* %x
   %b = load i64, i64* %y
-  %d = insertelement <2 x i64> undef, i64 %b, i64 0
-  %e = shufflevector <2 x i64> %d, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = insertelement <2 x i64> poison, i64 %b, i64 0
+  %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
   %f = sext <2 x i32> %a to <2 x i64>
   %g = mul <2 x i64> %e, %f
   ret <2 x i64> %g

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
index a083436386ab..6c204b24ae2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
@@ -430,8 +430,8 @@ define <2 x i16> @vwmulsu_vx_v2i16(<2 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i8>, <2 x i8>* %x
-  %b = insertelement <2 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i8> %a to <2 x i16>
   %e = zext <2 x i8> %c to <2 x i16>
   %f = mul <2 x i16> %d, %e
@@ -447,8 +447,8 @@ define <2 x i16> @vwmulsu_vx_v2i16_swap(<2 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v9
 ; CHECK-NEXT:    ret
   %a = load <2 x i8>, <2 x i8>* %x
-  %b = insertelement <2 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
   %d = zext <2 x i8> %a to <2 x i16>
   %e = sext <2 x i8> %c to <2 x i16>
   %f = mul <2 x i16> %d, %e
@@ -463,8 +463,8 @@ define <4 x i16> @vwmulsu_vx_v4i16(<4 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, <4 x i8>* %x
-  %b = insertelement <4 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i8> %a to <4 x i16>
   %e = zext <4 x i8> %c to <4 x i16>
   %f = mul <4 x i16> %d, %e
@@ -479,8 +479,8 @@ define <2 x i32> @vwmulsu_vx_v2i32(<2 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i16>, <2 x i16>* %x
-  %b = insertelement <2 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <2 x i16> %b, <2 x i16> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i16> %a to <2 x i32>
   %e = zext <2 x i16> %c to <2 x i32>
   %f = mul <2 x i32> %d, %e
@@ -495,8 +495,8 @@ define <8 x i16> @vwmulsu_vx_v8i16(<8 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i8> %a to <8 x i16>
   %e = zext <8 x i8> %c to <8 x i16>
   %f = mul <8 x i16> %d, %e
@@ -511,8 +511,8 @@ define <4 x i32> @vwmulsu_vx_v4i32(<4 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, <4 x i16>* %x
-  %b = insertelement <4 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <4 x i16> %b, <4 x i16> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i16> %a to <4 x i32>
   %e = zext <4 x i16> %c to <4 x i32>
   %f = mul <4 x i32> %d, %e
@@ -527,8 +527,8 @@ define <2 x i64> @vwmulsu_vx_v2i64(<2 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i32>, <2 x i32>* %x
-  %b = insertelement <2 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <2 x i32> %b, <2 x i32> poison, <2 x i32> zeroinitializer
   %d = sext <2 x i32> %a to <2 x i64>
   %e = zext <2 x i32> %c to <2 x i64>
   %f = mul <2 x i64> %d, %e
@@ -543,8 +543,8 @@ define <16 x i16> @vwmulsu_vx_v16i16(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i8> %a to <16 x i16>
   %e = zext <16 x i8> %c to <16 x i16>
   %f = mul <16 x i16> %d, %e
@@ -559,8 +559,8 @@ define <8 x i32> @vwmulsu_vx_v8i32(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i16> %a to <8 x i32>
   %e = zext <8 x i16> %c to <8 x i32>
   %f = mul <8 x i32> %d, %e
@@ -575,8 +575,8 @@ define <4 x i64> @vwmulsu_vx_v4i64(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = sext <4 x i32> %a to <4 x i64>
   %e = zext <4 x i32> %c to <4 x i64>
   %f = mul <4 x i64> %d, %e
@@ -592,8 +592,8 @@ define <32 x i16> @vwmulsu_vx_v32i16(<32 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = sext <32 x i8> %a to <32 x i16>
   %e = zext <32 x i8> %c to <32 x i16>
   %f = mul <32 x i16> %d, %e
@@ -608,8 +608,8 @@ define <16 x i32> @vwmulsu_vx_v16i32(<16 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %x
-  %b = insertelement <16 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <16 x i16> %b, <16 x i16> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i16> %a to <16 x i32>
   %e = zext <16 x i16> %c to <16 x i32>
   %f = mul <16 x i32> %d, %e
@@ -624,8 +624,8 @@ define <8 x i64> @vwmulsu_vx_v8i64(<8 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
-  %b = insertelement <8 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <8 x i32> %b, <8 x i32> poison, <8 x i32> zeroinitializer
   %d = sext <8 x i32> %a to <8 x i64>
   %e = zext <8 x i32> %c to <8 x i64>
   %f = mul <8 x i64> %d, %e
@@ -641,8 +641,8 @@ define <64 x i16> @vwmulsu_vx_v64i16(<64 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = sext <64 x i8> %a to <64 x i16>
   %e = zext <64 x i8> %c to <64 x i16>
   %f = mul <64 x i16> %d, %e
@@ -658,8 +658,8 @@ define <32 x i32> @vwmulsu_vx_v32i32(<32 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %x
-  %b = insertelement <32 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <32 x i16> %b, <32 x i16> poison, <32 x i32> zeroinitializer
   %d = sext <32 x i16> %a to <32 x i32>
   %e = zext <32 x i16> %c to <32 x i32>
   %f = mul <32 x i32> %d, %e
@@ -674,8 +674,8 @@ define <16 x i64> @vwmulsu_vx_v16i64(<16 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %x
-  %b = insertelement <16 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <16 x i32> %b, <16 x i32> poison, <16 x i32> zeroinitializer
   %d = sext <16 x i32> %a to <16 x i64>
   %e = zext <16 x i32> %c to <16 x i64>
   %f = mul <16 x i64> %d, %e

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index 8e9a6598acd1..1351adec9523 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -414,8 +414,8 @@ define <2 x i16> @vwmulu_vx_v2i16(<2 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i8>, <2 x i8>* %x
-  %b = insertelement <2 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
   %d = zext <2 x i8> %a to <2 x i16>
   %e = zext <2 x i8> %c to <2 x i16>
   %f = mul <2 x i16> %d, %e
@@ -430,8 +430,8 @@ define <4 x i16> @vwmulu_vx_v4i16(<4 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, <4 x i8>* %x
-  %b = insertelement <4 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> zeroinitializer
   %d = zext <4 x i8> %a to <4 x i16>
   %e = zext <4 x i8> %c to <4 x i16>
   %f = mul <4 x i16> %d, %e
@@ -446,8 +446,8 @@ define <2 x i32> @vwmulu_vx_v2i32(<2 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i16>, <2 x i16>* %x
-  %b = insertelement <2 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <2 x i16> %b, <2 x i16> poison, <2 x i32> zeroinitializer
   %d = zext <2 x i16> %a to <2 x i32>
   %e = zext <2 x i16> %c to <2 x i32>
   %f = mul <2 x i32> %d, %e
@@ -462,8 +462,8 @@ define <8 x i16> @vwmulu_vx_v8i16(<8 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
-  %b = insertelement <8 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
   %d = zext <8 x i8> %a to <8 x i16>
   %e = zext <8 x i8> %c to <8 x i16>
   %f = mul <8 x i16> %d, %e
@@ -478,8 +478,8 @@ define <4 x i32> @vwmulu_vx_v4i32(<4 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, <4 x i16>* %x
-  %b = insertelement <4 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <4 x i16> %b, <4 x i16> poison, <4 x i32> zeroinitializer
   %d = zext <4 x i16> %a to <4 x i32>
   %e = zext <4 x i16> %c to <4 x i32>
   %f = mul <4 x i32> %d, %e
@@ -494,8 +494,8 @@ define <2 x i64> @vwmulu_vx_v2i64(<2 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %a = load <2 x i32>, <2 x i32>* %x
-  %b = insertelement <2 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
+  %b = insertelement <2 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <2 x i32> %b, <2 x i32> poison, <2 x i32> zeroinitializer
   %d = zext <2 x i32> %a to <2 x i64>
   %e = zext <2 x i32> %c to <2 x i64>
   %f = mul <2 x i64> %d, %e
@@ -510,8 +510,8 @@ define <16 x i16> @vwmulu_vx_v16i16(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
-  %b = insertelement <16 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
   %d = zext <16 x i8> %a to <16 x i16>
   %e = zext <16 x i8> %c to <16 x i16>
   %f = mul <16 x i16> %d, %e
@@ -526,8 +526,8 @@ define <8 x i32> @vwmulu_vx_v8i32(<8 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
-  %b = insertelement <8 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
   %d = zext <8 x i16> %a to <8 x i32>
   %e = zext <8 x i16> %c to <8 x i32>
   %f = mul <8 x i32> %d, %e
@@ -542,8 +542,8 @@ define <4 x i64> @vwmulu_vx_v4i64(<4 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
-  %b = insertelement <4 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+  %b = insertelement <4 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
   %d = zext <4 x i32> %a to <4 x i64>
   %e = zext <4 x i32> %c to <4 x i64>
   %f = mul <4 x i64> %d, %e
@@ -559,8 +559,8 @@ define <32 x i16> @vwmulu_vx_v32i16(<32 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
-  %b = insertelement <32 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
   %d = zext <32 x i8> %a to <32 x i16>
   %e = zext <32 x i8> %c to <32 x i16>
   %f = mul <32 x i16> %d, %e
@@ -575,8 +575,8 @@ define <16 x i32> @vwmulu_vx_v16i32(<16 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %x
-  %b = insertelement <16 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <16 x i16> %b, <16 x i16> poison, <16 x i32> zeroinitializer
   %d = zext <16 x i16> %a to <16 x i32>
   %e = zext <16 x i16> %c to <16 x i32>
   %f = mul <16 x i32> %d, %e
@@ -591,8 +591,8 @@ define <8 x i64> @vwmulu_vx_v8i64(<8 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
-  %b = insertelement <8 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+  %b = insertelement <8 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <8 x i32> %b, <8 x i32> poison, <8 x i32> zeroinitializer
   %d = zext <8 x i32> %a to <8 x i64>
   %e = zext <8 x i32> %c to <8 x i64>
   %f = mul <8 x i64> %d, %e
@@ -608,8 +608,8 @@ define <64 x i16> @vwmulu_vx_v64i16(<64 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
-  %b = insertelement <64 x i8> undef, i8 %y, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
   %d = zext <64 x i8> %a to <64 x i16>
   %e = zext <64 x i8> %c to <64 x i16>
   %f = mul <64 x i16> %d, %e
@@ -625,8 +625,8 @@ define <32 x i32> @vwmulu_vx_v32i32(<32 x i16>* %x, i16 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %x
-  %b = insertelement <32 x i16> undef, i16 %y, i32 0
-  %c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
+  %b = insertelement <32 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <32 x i16> %b, <32 x i16> poison, <32 x i32> zeroinitializer
   %d = zext <32 x i16> %a to <32 x i32>
   %e = zext <32 x i16> %c to <32 x i32>
   %f = mul <32 x i32> %d, %e
@@ -641,8 +641,8 @@ define <16 x i64> @vwmulu_vx_v16i64(<16 x i32>* %x, i32 %y) {
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %x
-  %b = insertelement <16 x i32> undef, i32 %y, i64 0
-  %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
+  %b = insertelement <16 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <16 x i32> %b, <16 x i32> poison, <16 x i32> zeroinitializer
   %d = zext <16 x i32> %a to <16 x i64>
   %e = zext <16 x i32> %c to <16 x i64>
   %f = mul <16 x i64> %d, %e
@@ -660,8 +660,8 @@ define <8 x i16> @vwmulu_vx_v8i16_i8(<8 x i8>* %x, i8* %y) {
   %a = load <8 x i8>, <8 x i8>* %x
   %b = load i8, i8* %y
   %c = zext i8 %b to i16
-  %d = insertelement <8 x i16> undef, i16 %c, i32 0
-  %e = shufflevector <8 x i16> %d, <8 x i16> undef, <8 x i32> zeroinitializer
+  %d = insertelement <8 x i16> poison, i16 %c, i32 0
+  %e = shufflevector <8 x i16> %d, <8 x i16> poison, <8 x i32> zeroinitializer
   %f = zext <8 x i8> %a to <8 x i16>
   %g = mul <8 x i16> %e, %f
   ret <8 x i16> %g
@@ -679,8 +679,8 @@ define <8 x i16> @vwmulu_vx_v8i16_i16(<8 x i8>* %x, i16* %y) {
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = load i16, i16* %y
-  %d = insertelement <8 x i16> undef, i16 %b, i32 0
-  %e = shufflevector <8 x i16> %d, <8 x i16> undef, <8 x i32> zeroinitializer
+  %d = insertelement <8 x i16> poison, i16 %b, i32 0
+  %e = shufflevector <8 x i16> %d, <8 x i16> poison, <8 x i32> zeroinitializer
   %f = zext <8 x i8> %a to <8 x i16>
   %g = mul <8 x i16> %e, %f
   ret <8 x i16> %g
@@ -697,8 +697,8 @@ define <4 x i32> @vwmulu_vx_v4i32_i8(<4 x i16>* %x, i8* %y) {
   %a = load <4 x i16>, <4 x i16>* %x
   %b = load i8, i8* %y
   %c = zext i8 %b to i32
-  %d = insertelement <4 x i32> undef, i32 %c, i32 0
-  %e = shufflevector <4 x i32> %d, <4 x i32> undef, <4 x i32> zeroinitializer
+  %d = insertelement <4 x i32> poison, i32 %c, i32 0
+  %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
   %f = zext <4 x i16> %a to <4 x i32>
   %g = mul <4 x i32> %e, %f
   ret <4 x i32> %g
@@ -715,8 +715,8 @@ define <4 x i32> @vwmulu_vx_v4i32_i16(<4 x i16>* %x, i16* %y) {
   %a = load <4 x i16>, <4 x i16>* %x
   %b = load i16, i16* %y
   %c = zext i16 %b to i32
-  %d = insertelement <4 x i32> undef, i32 %c, i32 0
-  %e = shufflevector <4 x i32> %d, <4 x i32> undef, <4 x i32> zeroinitializer
+  %d = insertelement <4 x i32> poison, i32 %c, i32 0
+  %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
   %f = zext <4 x i16> %a to <4 x i32>
   %g = mul <4 x i32> %e, %f
   ret <4 x i32> %g
@@ -734,8 +734,8 @@ define <4 x i32> @vwmulu_vx_v4i32_i32(<4 x i16>* %x, i32* %y) {
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, <4 x i16>* %x
   %b = load i32, i32* %y
-  %d = insertelement <4 x i32> undef, i32 %b, i32 0
-  %e = shufflevector <4 x i32> %d, <4 x i32> undef, <4 x i32> zeroinitializer
+  %d = insertelement <4 x i32> poison, i32 %b, i32 0
+  %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
   %f = zext <4 x i16> %a to <4 x i32>
   %g = mul <4 x i32> %e, %f
   ret <4 x i32> %g
@@ -772,8 +772,8 @@ define <2 x i64> @vwmulu_vx_v2i64_i8(<2 x i32>* %x, i8* %y) {
   %a = load <2 x i32>, <2 x i32>* %x
   %b = load i8, i8* %y
   %c = zext i8 %b to i64
-  %d = insertelement <2 x i64> undef, i64 %c, i64 0
-  %e = shufflevector <2 x i64> %d, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = insertelement <2 x i64> poison, i64 %c, i64 0
+  %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
   %f = zext <2 x i32> %a to <2 x i64>
   %g = mul <2 x i64> %e, %f
   ret <2 x i64> %g
@@ -810,8 +810,8 @@ define <2 x i64> @vwmulu_vx_v2i64_i16(<2 x i32>* %x, i16* %y) {
   %a = load <2 x i32>, <2 x i32>* %x
   %b = load i16, i16* %y
   %c = zext i16 %b to i64
-  %d = insertelement <2 x i64> undef, i64 %c, i64 0
-  %e = shufflevector <2 x i64> %d, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = insertelement <2 x i64> poison, i64 %c, i64 0
+  %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
   %f = zext <2 x i32> %a to <2 x i64>
   %g = mul <2 x i64> %e, %f
   ret <2 x i64> %g
@@ -848,8 +848,8 @@ define <2 x i64> @vwmulu_vx_v2i64_i32(<2 x i32>* %x, i32* %y) {
   %a = load <2 x i32>, <2 x i32>* %x
   %b = load i32, i32* %y
   %c = zext i32 %b to i64
-  %d = insertelement <2 x i64> undef, i64 %c, i64 0
-  %e = shufflevector <2 x i64> %d, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = insertelement <2 x i64> poison, i64 %c, i64 0
+  %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
   %f = zext <2 x i32> %a to <2 x i64>
   %g = mul <2 x i64> %e, %f
   ret <2 x i64> %g
@@ -885,8 +885,8 @@ define <2 x i64> @vwmulu_vx_v2i64_i64(<2 x i32>* %x, i64* %y) {
 ; RV64-NEXT:    ret
   %a = load <2 x i32>, <2 x i32>* %x
   %b = load i64, i64* %y
-  %d = insertelement <2 x i64> undef, i64 %b, i64 0
-  %e = shufflevector <2 x i64> %d, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = insertelement <2 x i64> poison, i64 %b, i64 0
+  %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
   %f = zext <2 x i32> %a to <2 x i64>
   %g = mul <2 x i64> %e, %f
   ret <2 x i64> %g


        


More information about the llvm-commits mailing list